source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
elemwise_binary_scalar_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <utility>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "elemwise_unary_op.h"
namespace mxnet {
namespace op {
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
const double alpha = nnvm::get<double>(attrs.parsed);
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(
rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter])
: row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1]
!= row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * row_count,
output_data.dptr_,
input_data.dptr_,
DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
CHECK_EQ(output.shape(), input.shape());
const double alpha = nnvm::get<double>(attrs.parsed);
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(stream, output.shape().Size(), dense_fill_val,
req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType *in = input.data().dptr<DType>();
const IType *column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType *row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row = !last_row
? static_cast<size_t>(row_starts_ptr[i + 1])
- row_item_start_iter
: item_count - row_item_start_iter;
if (input_items_this_row) {
const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType *row_data_start = in + row_item_start_iter;
DType *output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template<typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void LogicComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet::op::mxnet_op::Kernel<mxnet::op::mxnet_op::op_with_req<
mxnet::op::mxnet_op::backward_grad_tuned<OP>, Req>, xpu>::
Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>(),
DType(alpha));
});
});
}
};
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_argument("scalar", "float", "scalar input")
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
GB_unop__identity_uint64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_fc64)
// op(A') function: GB (_unop_tran__identity_uint64_fc64)
// C type: uint64_t
// A type: GxB_FC64_t
// cast: uint64_t cij = GB_cast_to_uint64_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = GB_cast_to_uint64_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = GB_cast_to_uint64_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_fc64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t (creal (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gstats.h | /**
* Usage goal:
* gstats_all_data::create_stat(name, optional aggregation_and_print_function)
* returns a STAT_ID, which I then #define
* also inserts name, gstats_stat_id and gstats_stat_id, name into dictionaries
* also saves gstats_stat_id, aggregation_and_print_function in a dict if it is set
* gstats_all_data::add_stat(STAT_ID, value)
* gstats_all_data::print_stat(STAT_ID, aggregation_and_print_function)
* gstats_all_data::print_all_stats()
* uses any aggregation_and_print_function specifications made in create_stat
*
* aggregation_and_print_functions:
* use them with lambdas to curry config args
* (could efficiently combine them in one loop over the data by making the agg_
* functions only contain the body of a loop over the data, then we can have
* a single aggregate function with an agg_ function argument that performs
* a loop, and runs all registered agg_ functions for a stat, one after another.
* printing will happen with separate print_ functions after the loop.
* declarations are still an issue, though, unless they're stored in a dict
* on the stack.)
*/
// TODO: add more output options, such as graphs!
/**
* TODO: change stats so that handle_stats no longer takes gstats_output_items,
* and you can instead get stat aggregations using a function get(expr).
* the ultimate goal is to be able to provide complex expressions using
* the following grammar:
*
* START = EXPR
* EXPR = FLOAT | STAT_NAME
* EXPR = [EXPR OPERATOR EXPR]
* EXPR = (AGGR GRAN EXPR)
* OPERATOR = + | - | * | / | %
* AGGR = SUM | AVG | MIN | MAX | COUNT | VARIANCE | STDEV | HIST_LOG | HIST_LIN
* GRAN = BY_INDEX | BY_THREAD | ALL
*
* This will allow invocations such as the following:
* get("3.0") -> returns array with a[0] = 3.0
* get("num_updates") -> returns array containing all values stored for num_updates (probably per-thread values)
* get("(SUM ALL num_updates)") -> returns array with a[0] = the sum of num_updates over all threads&indices
* get("(SUM BY_THREAD num_updates)") -> returns array containing per-thread sums of num_updates
* get("[(SUM ALL num_updates) / 3.0]") -> returns array with a[0] = the sum of num_updates over all threads&indices, all divided by 3.0
* get("[(SUM ALL num_updates) / " + str(elapsed_millis/1000.) + "]") -> returns array with a[0] = throughput, which is computed as the sum of num_updates over all threads&indices, all divided by the number of seconds elapsed (assuming elapsed_millis is a local variable containing a number of milliseconds elapsed since some earlier time)
* get("[(SUM BY_THREAD num_updates) / " + str(elapsed_millis/1000.) + "]") -> returns array containing per-thread throughputs
* get("[(STDEV BY_THREAD num_updates) / " + str(elapsed_millis/1000.) + "]") -> returns array containing per-thread standard deviations for throughputs
* get("[[(STDEV BY_THREAD num_updates) / " + str(elapsed_millis/1000.) + "] / [(SUM BY_THREAD num_updates) / " + str(elapsed_millis/1000.) + "]]") -> returns array containing per-thread standard deviations for throughputs, expressed as a percentage of the thread's throughput
* get("(HIST_LOG ALL range_query_sizes)") -> returns array representing a logarithmic histogram of the values in range_query_size over all threads&indices
* get("(HIST_LOG BY_THREAD range_query_sizes)") -> returns array containing per-thread arrays, each of which represents a logarithmic histogram of the values in range_query_size over all indices
*/
/**
* Another attempt at a grammar for expressions:
* START = EXPR
* EXPR = FLOAT | STAT_NAME -- floating point number or stat name token
* EXPR = [EXPR OPERATOR EXPR] -- basic operator math (applies pairwise to array elements)
* EXPR = (REDUCE_OP GRANULARITY EXPR) -- aggregating expression (reduce)
* OPERATOR = + | - | * | / | %
* REDUCE_OP = OPERATOR | min | max | avg | count | variance | stdev
* REDUCE_OP = hist_log | hist_lin
* GRANULARITY = by_index | by_thread | all
*/
// todo: later, expand this to work with tables of data, not just arrays indexed by thread and an arbitrary index (could use that arbitrary index to handle tables by concatenating table keys?)
/**
* I've since realized that the evolution of ideas above basically mirrors the development of SQL databases.
* I think on the output side, if I want an output module with fancier computational powers, I really just want to harness an in-memory SQL database like sqlite.
*/
#ifndef GSTATS_H
#define GSTATS_H
#include <thread>
#include <string>
#include <vector>
#include <map>
#include <cassert>
#include <cmath>
#include <iostream>
#include <fstream>
#include <sstream>
#include <algorithm>
#include "errors.h"
#include "locks_impl.h"
#include "plaf.h"
#ifndef VERBOSE
#define VERBOSE if(0)
#endif
#define GSTATS_COMMA ,
#define GSTATS_THREAD_PADDING_BYTES 256
#define GSTATS_MAX_NUM_STATS 128
#ifndef GSTATS_MAX_THREAD_BUF_SIZE
# define GSTATS_MAX_THREAD_BUF_SIZE (1<<22)
#endif
#define GSTATS_DATA_SIZE_BYTES 8
#define GSTATS_BITS_IN_BYTE 8
#define GSTATS_DEFAULT_HISTOGRAM_LIN_NUM_BUCKETS 10
#define GSTATS_DEFAULT_HISTOGRAM_LOG_NUM_BUCKETS GSTATS_DATA_SIZE_BYTES * GSTATS_BITS_IN_BYTE
#define GSTATS_SQ(x) ((x)*(x))
#define GSTATS_USE_TEMPLATE(id, func, args) (this->data_types[id] == LONG_LONG ? func<long long>(args) : func<double>(args))
typedef int gstats_stat_id;
enum gstats_enum_data_type {
LONG_LONG,
DOUBLE
};
enum gstats_enum_output_method {
PRINT_RAW,
PRINT_HISTOGRAM_LOG,
PRINT_HISTOGRAM_LIN,
PRINT_TO_FILE
};
enum gstats_enum_aggregation_function {
NONE,
FIRST,
COUNT,
MIN,
MAX,
SUM,
AVERAGE,
VARIANCE,
STDEV
};
enum gstats_enum_aggregation_granularity {
FULL_DATA,
TOTAL,
BY_INDEX,
BY_THREAD
};
class gstats_output_item {
public:
gstats_enum_output_method method;
gstats_enum_aggregation_function func;
gstats_enum_aggregation_granularity granularity;
const char * const output_filename;
int num_buckets_if_histogram_lin;
gstats_output_item(gstats_enum_output_method method
, gstats_enum_aggregation_function func
, gstats_enum_aggregation_granularity granularity
, const char * const output_filename = NULL
, const int num_buckets_if_histogram_lin = GSTATS_DEFAULT_HISTOGRAM_LIN_NUM_BUCKETS)
: method(method)
, func(func)
, granularity(granularity)
, output_filename(output_filename)
, num_buckets_if_histogram_lin(num_buckets_if_histogram_lin) {
if (granularity == TOTAL && (method == PRINT_HISTOGRAM_LOG || method == PRINT_HISTOGRAM_LIN)) {
setbench_error("cannot use granularity TOTAL with HISTOGRAM methods");
}
if ((method == PRINT_TO_FILE) && (func != NONE || granularity != FULL_DATA)) {
setbench_error("PRINT_TO_FILE can only be used with aggregation function NONE, and granularity FULL_DATA");
}
if (output_filename && (method != PRINT_TO_FILE || func != NONE || granularity != FULL_DATA)) {
setbench_error("output_filename can only be used with PRINT_TO_FILE, NONE, FULL_DATA");
}
}
};
class gstats_t {
private:
class gstats_thread_data {
public:
PAD;
char data[GSTATS_MAX_THREAD_BUF_SIZE]; // STORES ONLY LONG LONGS AND DOUBLES (8 bytes each)
int offset[GSTATS_MAX_NUM_STATS];
int capacity[GSTATS_MAX_NUM_STATS];
int size[GSTATS_MAX_NUM_STATS];
PAD;
template <typename T>
inline T * get_ptr(gstats_stat_id id) {
return (T *) (data + offset[id]);
}
};
public:
template <typename T>
struct stat_metrics {
T first;
T cnt;
T min;
T max;
T sum;
T avg;
T variance;
T stdev;
T none; // only used for histograms -- we simply processes all entries into the histogram (without aggregating), rather than plotting a histogram of aggregated data
stat_metrics() {
first = 0;
cnt = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
sum = 0;
avg = 0;
variance = 0;
stdev = 0;
none = 0;
}
};
private:
#define GSTATS_FIELD_FIRST first
#define GSTATS_FIELD_COUNT cnt
#define GSTATS_FIELD_MIN min
#define GSTATS_FIELD_MAX max
#define GSTATS_FIELD_SUM sum
#define GSTATS_FIELD_AVERAGE avg
#define GSTATS_FIELD_VARIANCE variance
#define GSTATS_FIELD_STDEV stdev
#define GSTATS_FIELD_NONE none
#define GSTATS_TYPE_TO_FIELD(type) GSTATS_FIELD_##type
#define GSTATS_PRINT_LOWER(str) { std::string __s = str; transform(__s.begin(), __s.end(), __s.begin(), ::tolower); std::cout<<__s; }
#define GSTATS_PRINT_AGG(agg_granularity_str, type, sid, metrics, num_metrics) { \
GSTATS_PRINT_LOWER(#type); \
std::cout<<"_"<<id_to_name[sid]<<agg_granularity_str<<"="; \
if (metrics == NULL) { \
std::cout<<std::endl; \
for (int __tid=0;__tid<NUM_PROCESSES;++__tid) { \
if (thread_data[__tid].size[sid]) { \
std::cout<<"thread "<<__tid; \
for (int __ix=0;__ix<thread_data[__tid].size[sid];++__ix) { \
if (get_stat<T>(__tid, sid, __ix) == std::numeric_limits<T>::max() || get_stat<T>(__tid, sid, __ix) == std::numeric_limits<T>::min()) { \
std::cout<<" "<<"0"; \
} else { \
std::cout<<" "<<get_stat<T>(__tid, sid, __ix); \
} \
} \
std::cout<<std::endl; \
} \
} \
} else { \
/* get last "empty" field so we can avoid printing lots of trailing zeros */ \
int __lastEmpty = num_metrics; \
for (int __i=num_metrics-1;__i>=0;--__i) { \
if (metrics[__i].GSTATS_TYPE_TO_FIELD(type) == std::numeric_limits<T>::max() || metrics[__i].GSTATS_TYPE_TO_FIELD(type) == std::numeric_limits<T>::min() || metrics[__i].GSTATS_TYPE_TO_FIELD(type) == 0) { \
__lastEmpty = __i; \
} else { \
break; \
} \
} \
/* print the fields (metrics) */ \
for (int __i=0;__i<__lastEmpty;++__i) { \
if (metrics[__i].GSTATS_TYPE_TO_FIELD(type) == std::numeric_limits<T>::max() || metrics[__i].GSTATS_TYPE_TO_FIELD(type) == std::numeric_limits<T>::min()) { \
std::cout<<(__i?" ":"")<<"0"; \
} else { \
std::cout<<(__i?" ":"")<<metrics[__i].GSTATS_TYPE_TO_FIELD(type); \
} \
} \
std::cout<<std::endl; \
} \
}
#define GSTATS_PRINT_HISTOGRAM_LOG(sid, agg_granularity_str, type, metrics, num_metrics) { \
stat_metrics<long long> * __histogram = get_histogram_log<T>((sid), metrics, num_metrics); \
int __first_nonzero = -1; \
int __last_nonzero = 0; \
for (int __i=0;__i<=GSTATS_DEFAULT_HISTOGRAM_LOG_NUM_BUCKETS;++__i) { \
if (__histogram[__i].GSTATS_TYPE_TO_FIELD(type) > 0) { \
__last_nonzero = __i; \
if (__first_nonzero == -1) __first_nonzero = __i; \
} \
} \
if (__first_nonzero == -1) __first_nonzero = 0; \
std::cout<<std::endl<<"log_histogram_of_"; \
GSTATS_PRINT_LOWER(#type); \
std::cout<<"_"<<id_to_name[sid]<<agg_granularity_str<<"="; \
for (int __i=0;__i<=__last_nonzero;++__i) std::cout<<(__i?" ":"")<<(1LL<<__i)<<":"<<__histogram[__i].GSTATS_TYPE_TO_FIELD(type); \
std::cout<<std::endl; \
for (int __i=__first_nonzero;__i<=__last_nonzero;++__i) { \
std::cout<<" "<<(__i?"(":"[")<<"2^"<<twoDigits(__i)<<", 2^"<<twoDigits(__i+1)<<"]: "<<__histogram[__i].GSTATS_TYPE_TO_FIELD(type)<<std::endl; \
} \
}
#define GSTATS_PASTE2(a,b) a##b
#define GSTATS_PASTE(a,b) GSTATS_PASTE2(a,b)
#define GSTATS_PASTE_MIN(arg) GSTATS_PASTE(arg,_min)
#define GSTATS_PASTE_MAX(arg) GSTATS_PASTE(arg,_max)
#define GSTATS_PASTE_BUCKET_SIZE(arg) GSTATS_PASTE(arg,_bucket_size)
#define GSTATS_PRINT_HISTOGRAM_LIN(sid, agg_granularity_str, type, metrics, num_metrics, num_buckets) { \
std::pair<stat_metrics<long long> *, histogram_lin_dims> __p = get_histogram_lin<T>((sid), (num_buckets), metrics, num_metrics); \
stat_metrics<long long> * __histogram = __p.first; \
histogram_lin_dims __dims = __p.second; \
auto __num_buckets = (__dims.GSTATS_PASTE_BUCKET_SIZE(GSTATS_TYPE_TO_FIELD(type)) < 1e-6) ? 0 : (num_buckets) - 1; \
std::cout<<std::endl<<"linear_histogram_of_"; \
GSTATS_PRINT_LOWER(#type); \
std::cout<<"_"<<id_to_name[sid]<<agg_granularity_str<<"="; \
for (int __i=0;__i<=__num_buckets;++__i) { \
if (__histogram[__i].GSTATS_TYPE_TO_FIELD(type)) { \
std::cout<<(__i?" ":"")<<(__dims.GSTATS_PASTE_MIN(GSTATS_TYPE_TO_FIELD(type)) + (1+__i)*__dims.GSTATS_PASTE_BUCKET_SIZE(GSTATS_TYPE_TO_FIELD(type)))<<":"<<__histogram[__i].GSTATS_TYPE_TO_FIELD(type); \
} \
} \
std::cout<<std::endl; \
for (int __i=0;__i<=__num_buckets;++__i) { \
if (__histogram[__i].GSTATS_TYPE_TO_FIELD(type)) { \
printf(" %s%12.2f, %12.2f]: %lld\n", (__i?"(":"["), (__dims.GSTATS_PASTE_MIN(GSTATS_TYPE_TO_FIELD(type)) + __i*__dims.GSTATS_PASTE_BUCKET_SIZE(GSTATS_TYPE_TO_FIELD(type))), (__dims.GSTATS_PASTE_MIN(GSTATS_TYPE_TO_FIELD(type)) + (1+__i)*__dims.GSTATS_PASTE_BUCKET_SIZE(GSTATS_TYPE_TO_FIELD(type))), __histogram[__i].GSTATS_TYPE_TO_FIELD(type)); \
} \
} \
}
PAD;
const int NUM_PROCESSES;
std::map<gstats_stat_id, std::string> id_to_name;
std::map<std::string, gstats_stat_id> name_to_id;
gstats_thread_data * thread_data;
gstats_stat_id num_stats;
std::multimap<gstats_stat_id, gstats_output_item> output_config;
// PAD;
std::vector<stat_metrics<double> *> arrays_to_delete; // to simplify deletion of heap allocated arrays
// PAD;
volatile int arrays_to_delete_lock = 0;
// PAD;
gstats_enum_data_type data_types[GSTATS_MAX_NUM_STATS];
// PAD;
stat_metrics<double> * computed_gstats_total[GSTATS_MAX_NUM_STATS]; // computed_gstats_total[gstats_stat_id][0] = stat_metrics
stat_metrics<double> * computed_gstats_by_index[GSTATS_MAX_NUM_STATS]; // computed_gstats_by_index[gstats_stat_id][index] = stat_metrics
stat_metrics<double> * computed_gstats_by_thread[GSTATS_MAX_NUM_STATS]; // computed_gstats_by_thread[gstats_stat_id][thread_id] = stat_metrics
// PAD;
int num_indices[GSTATS_MAX_NUM_STATS]; // num_indices[gstats_stat_id] = number of indices that have stat_metrics structs in computed_gstats_by_index[gstats_stat_id]
// PAD;
bool already_computed_stats;
PAD;
public:
gstats_t(const int num_processes)
: NUM_PROCESSES(num_processes)
, thread_data(new gstats_thread_data[num_processes])
, num_stats(0)
{
assert(sizeof(double) == GSTATS_DATA_SIZE_BYTES);
already_computed_stats = false;
memset(computed_gstats_total, 0, GSTATS_MAX_NUM_STATS*sizeof(stat_metrics<double> *));
memset(computed_gstats_by_index, 0, GSTATS_MAX_NUM_STATS*sizeof(stat_metrics<double> *));
memset(computed_gstats_by_thread, 0, GSTATS_MAX_NUM_STATS*sizeof(stat_metrics<double> *));
#pragma omp parallel for
for (int i=0;i<num_processes;++i) {
memset(&thread_data[i], 0, sizeof(gstats_thread_data));
}
// // parallel initialization
// VERBOSE std::cout<<"parallel stat intialization: spawning "<<std::thread::hardware_concurrency()<<" threads"<<std::endl;
// volatile bool start = false;
// std::thread threads[num_processes];
// for (int tid=0;tid<num_processes;++tid) {
// threads[tid] = std::thread([tid,&start,num_processes,this]() {
// // while (!start) { __sync_synchronize(); }
// //cout<<"thread "<<tid<<" initializing thread_data entry of size "<<sizeof(gstats_thread_data)<<std::endl;
// memset(&this->thread_data[tid], 0, sizeof(gstats_thread_data));
// });
// }
// start = true;
// __sync_synchronize();
// VERBOSE std::cout<<"parallel stat initialization: joining "<<num_processes<<" threads"<<std::endl;
// for (int tid=0;tid<num_processes;++tid) {
// threads[tid].join();
// }
// VERBOSE std::cout<<"parallel stat initialization: joined all."<<std::endl;
}
~gstats_t() {
#if !defined NO_CLEANUP_AFTER_WORKLOAD
acquireLock(&arrays_to_delete_lock);
for (auto it = arrays_to_delete.begin(); it != arrays_to_delete.end(); it++) {
delete[] *it;
}
releaseLock(&arrays_to_delete_lock);
delete[] thread_data;
#endif
}
template <typename T>
void clear_to_value(gstats_stat_id id, T value) {
for (int tid=0;tid<NUM_PROCESSES;++tid) {
for (int i=0;i<thread_data[tid].capacity[id];++i) {
set_stat(tid, id, value, i);
}
}
}
void clear_all() {
#pragma omp parallel for
for (int tid=0;tid<NUM_PROCESSES;++tid) {
for (gstats_stat_id id=0;id<num_stats;++id) {
memset(thread_data[tid].data + thread_data[tid].offset[id], 0, GSTATS_DATA_SIZE_BYTES*thread_data[tid].size[id]);
}
memset(thread_data[tid].size, 0, sizeof(int)*GSTATS_MAX_NUM_STATS);
}
for (gstats_stat_id id=0;id<num_stats;++id) {
computed_gstats_total[id] = NULL;
computed_gstats_by_index[id] = NULL;
computed_gstats_by_thread[id] = NULL;
// computed_histograms_log[id] = NULL;
// computed_histograms_lin[id] = NULL;
num_indices[id] = 0;
}
already_computed_stats = false;
}
gstats_stat_id create_stat(gstats_enum_data_type datatype, std::string name, const int capacity, std::initializer_list<gstats_output_item> gstats_output_items) {
gstats_stat_id id = num_stats;
data_types[id] = datatype;
id_to_name[id] = name;
name_to_id[name] = id;
// save desired output items
for (auto output_item : gstats_output_items) {
output_config.insert(std::pair<gstats_stat_id, gstats_output_item>(id, output_item));
}
// initialize stat for all threads
for (int tid=0;tid<NUM_PROCESSES;++tid) {
thread_data[tid].offset[id] = (id == 0) ? 0 : thread_data[tid].offset[id-1] + thread_data[tid].capacity[id-1]*GSTATS_DATA_SIZE_BYTES;
thread_data[tid].capacity[id] = capacity;
auto endSize = thread_data[tid].offset[id] + thread_data[tid].capacity[id]*GSTATS_DATA_SIZE_BYTES;
if (endSize > GSTATS_MAX_THREAD_BUF_SIZE) {
std::cout<<"ERROR: stat w/id "<<id<<" ends at offset "<<endSize<<" in thread_data[tid].data, which runs off the end of the array. Either shrink your stats increase GSTATS_MAX_THREAD_BUF_SIZE in common/gstats.h."<<std::endl;
exit(1);
}
thread_data[tid].size[id] = 0;
//if (tid == 0) std::cout<<"stat id="<<id<<" name="<<name<<" tid="<<tid<<" offset="<<thread_data[tid].offset[id]<<" capacity="<<thread_data[tid].capacity[id]<<" size="<<thread_data[tid].size[id]<<" stat_ptr_addr="<<(long long) thread_data[tid].get_ptr<void>(id)<<std::endl;
//if (tid == 0) std::cout<<"stat "<<id<<": "<<name<<" offset="<<thread_data[tid].offset[id]<<" capacity="<<thread_data[tid].capacity[id]<<std::endl;
}
num_stats++;
if (num_stats > GSTATS_MAX_NUM_STATS) {
std::cout<<"ERROR: added too many stats. either eliminate some stats or increase GSTATS_MAX_NUM_STATS in common/gstats.h."<<std::endl;
exit(1);
}
return id;
}
template <typename T>
inline T add_stat(const int tid, const gstats_stat_id id, T value, const int index) {
if (index < 0 || index >= thread_data[tid].capacity[id]) {
//error("index="<<index<<" >= capacity="<<thread_data[tid].capacity[id]<<" for tid="<<tid<<" sid="<<id<<" stat="<<id_to_name[id]);
return -1;
}
assert(index >= 0 || index < thread_data[tid].capacity[id]);
T * ptr = thread_data[tid].get_ptr<T>(id);
T retval = (ptr[index] += value);
//cout<<"adding to id="<<id<<" index="<<index<<" value="<<value<<" result="<<ptr[index]<<std::endl;
if (index >= thread_data[tid].size[id]) {
thread_data[tid].size[id] = index+1;
}
return retval;
}
template <typename T>
inline T set_stat(const int tid, const gstats_stat_id id, T value, const int index) {
T * ptr = thread_data[tid].get_ptr<T>(id);
if (index >= thread_data[tid].capacity[id]) {
//error("index="<<index<<" >= capacity="<<thread_data[tid].capacity[id]<<" for tid="<<tid<<" sid="<<id<<" stat="<<id_to_name[id]);
return -1;
}
assert(index < thread_data[tid].capacity[id]);
ptr[index] = value;
//cout<<"adding to id="<<id<<" index="<<index<<" value="<<value<<" result="<<ptr[index]<<std::endl;
if (index >= thread_data[tid].size[id]) {
thread_data[tid].size[id] = index+1;
}
return value;
}
template <typename T>
inline T append_stat(const int tid, const gstats_stat_id id, T value) {
int index = thread_data[tid].size[id];
if (index >= thread_data[tid].capacity[id]) {
//error("index="<<index<<" >= capacity="<<thread_data[tid].capacity[id]<<" for tid="<<tid<<" sid="<<id<<" stat="<<id_to_name[id]);
return -1;
}
T * ptr = thread_data[tid].get_ptr<T>(id);
ptr[index] = value;
// std::cout<<"appending to id="<<id<<" index="<<index<<" value="<<value<<" at index="<<index<<" result="<<ptr[index]<<std::endl;
++thread_data[tid].size[id];
return value;
}
template <typename T>
inline T get_stat(const int tid, const gstats_stat_id id, const int index) {
if (index >= thread_data[tid].capacity[id]) {
setbench_error("index="<<index<<" >= capacity="<<thread_data[tid].capacity[id]<<" for tid="<<tid<<" sid="<<id<<" stat="<<id_to_name[id]);
}
T * ptr = thread_data[tid].get_ptr<T>(id);
return ptr[index];
}
private:
std::string twoDigits(int x) {
std::stringstream ss;
if (x >= 0 && x < 10) {
ss<<"0";
}
ss<<x;
return ss.str();
}
// TODO: implement more efficient memoization of each individual stat_metrics computation, below, so there are no duplicate computations, and no unnecessary computations
// TODO: use a supplied predicate to ignore certain values, or make a distinction between initialized and uninitialized fields (currently it's just ignoring 0)
template <typename T>
stat_metrics<T> * compute_stat_metrics_total(const gstats_stat_id id) {
stat_metrics<T> * results = new stat_metrics<T>[1];
acquireLock(&arrays_to_delete_lock);
arrays_to_delete.push_back((stat_metrics<double> *) results);
releaseLock(&arrays_to_delete_lock);
// first, cnt, min, max, sum
results[0].first = (NUM_PROCESSES <= 0 || thread_data[0].size[id] <= 0) ? 0 : thread_data[0].get_ptr<T>(id)[0];
for (int tid=0;tid<NUM_PROCESSES;++tid) {
int size = thread_data[tid].size[id];
auto data = thread_data[tid].get_ptr<T>(id);
for (int ix=0;ix<size;++ix) {
if (data[ix] == 0) continue;
++results[0].cnt;
results[0].min = std::min(results[0].min, data[ix]);
results[0].max = std::max(results[0].max, data[ix]);
results[0].sum += data[ix];
}
}
// avg
if (results[0].cnt > 0) results[0].avg = results[0].sum / results[0].cnt;
// variance
for (int tid=0;tid<NUM_PROCESSES;++tid) {
int size = thread_data[tid].size[id];
auto data = thread_data[tid].get_ptr<T>(id);
for (int ix=0;ix<size;++ix) {
if (data[ix] == 0) continue;
results[0].variance += GSTATS_SQ(data[ix] - results[0].avg);
}
}
// unbiased sample stdev
if (results[0].cnt > 1) results[0].stdev = sqrt(results[0].variance) / (results[0].cnt - 1);
return results;
}
template <typename T>
stat_metrics<T> * compute_stat_metrics_by_index(const gstats_stat_id id, int * num_indices) {
// figure out how many indices there are,
// so we know how many stat_metrics structs to allocate
int cnt = 0;
for (int tid=0;tid<NUM_PROCESSES;++tid) {
int size = thread_data[tid].size[id];
cnt = std::max(cnt, size);
}
stat_metrics<T> * results = new stat_metrics<T>[cnt];
acquireLock(&arrays_to_delete_lock);
arrays_to_delete.push_back((stat_metrics<double> *) results);
releaseLock(&arrays_to_delete_lock);
// first, cnt, min, max, sum, avg
for (int ix=0;ix<cnt;++ix) {
results[ix].first = (NUM_PROCESSES > 0) ? thread_data[0].get_ptr<T>(id)[ix] : 0;
for (int tid=0;tid<NUM_PROCESSES;++tid) {
int size = thread_data[tid].size[id];
if (ix >= size) continue;
auto data = thread_data[tid].get_ptr<T>(id);
if (data[ix] == 0) continue;
++results[ix].cnt;
results[ix].min = std::min(results[ix].min, data[ix]);
results[ix].max = std::max(results[ix].max, data[ix]);
results[ix].sum += data[ix];
}
if (results[ix].cnt > 0) results[ix].avg = results[ix].sum / results[ix].cnt;
}
// variance, stdev
for (int ix=0;ix<cnt;++ix) {
for (int tid=0;tid<NUM_PROCESSES;++tid) {
int size = thread_data[tid].size[id];
if (ix >= size) continue;
auto data = thread_data[tid].get_ptr<T>(id);
if (data[ix] == 0) continue;
results[ix].variance += GSTATS_SQ(data[ix] - results[ix].avg);
}
if (results[ix].cnt > 1) results[ix].stdev = sqrt(results[ix].variance) / (results[ix].cnt - 1);
}
*num_indices = cnt;
return results;
}
template <typename T>
stat_metrics<T> * compute_stat_metrics_by_thread(const gstats_stat_id id) {
stat_metrics<T> * results = new stat_metrics<T>[NUM_PROCESSES];
acquireLock(&arrays_to_delete_lock);
arrays_to_delete.push_back((stat_metrics<double> *) results);
releaseLock(&arrays_to_delete_lock);
// first, cnt, min, max, sum, avg
for (int tid=0;tid<NUM_PROCESSES;++tid) {
auto data = thread_data[tid].get_ptr<T>(id);
results[tid].first = (thread_data[tid].size[id] > 0) ? data[0] : 0;
for (int ix=0;ix<thread_data[tid].size[id];++ix) {
if (data[ix] == 0) continue;
++results[tid].cnt;
results[tid].min = std::min(results[tid].min, data[ix]);
results[tid].max = std::max(results[tid].max, data[ix]);
results[tid].sum += data[ix];
}
if (results[tid].cnt > 0) results[tid].avg = results[tid].sum / results[tid].cnt;
}
// variance, stdev
for (int tid=0;tid<NUM_PROCESSES;++tid) {
auto data = thread_data[tid].get_ptr<T>(id);
for (int ix=0;ix<thread_data[tid].size[id];++ix) {
if (data[ix] == 0) continue;
results[tid].variance += GSTATS_SQ(data[ix] - results[tid].avg);
}
if (results[tid].cnt > 1) results[tid].stdev = sqrt(results[tid].variance) / (results[tid].cnt - 1);
}
return results;
}
template <typename T>
int log2_capped(T x) {
if (x <= 0) return -1;
if (x <= 1) return 0;
T log2_x = log2(x);
int log2_x_i = (int) log2_x;
//if (log2_x_i < 0 || log2_x_i > GSTATS_DEFAULT_HISTOGRAM_LOG_NUM_BUCKETS) std::cout<<"x="<<x<<" log2_x_i="<<log2_x_i<<std::endl;
#ifndef NDEBUG
if (log2_x_i < 0) std::cout<<"log2_x_i="<<log2_x_i<<" log2_x="<<log2_x<<" x="<<x<<std::endl;
#endif
assert(log2_x_i >= 0);
return log2_x_i > GSTATS_DEFAULT_HISTOGRAM_LOG_NUM_BUCKETS
? GSTATS_DEFAULT_HISTOGRAM_LOG_NUM_BUCKETS
: log2_x_i;
}
template <typename T>
stat_metrics<long long> * get_histogram_log(const gstats_stat_id id, stat_metrics<T> * const metrics, const int num_metrics) {
// compute histogram
constexpr const int num_buckets = GSTATS_DEFAULT_HISTOGRAM_LOG_NUM_BUCKETS+1;
stat_metrics<long long> * __histogram = new stat_metrics<long long>[num_buckets+1];
acquireLock(&arrays_to_delete_lock);
arrays_to_delete.push_back((stat_metrics<double> *) __histogram);
releaseLock(&arrays_to_delete_lock);
for (int i=0;i<num_buckets+1;++i) {
memset(&__histogram[i], 0, sizeof(stat_metrics<long long>));
}
stat_metrics<long long> * histogram = &__histogram[1]; // handle -1s returned by log2_capped
// (MOSTLY) SEQUENTIAL
if (num_indices[id] <= (1<<16)) {
if (metrics) {
/**
* if metrics is non-null, compute histogram from metrics
*/
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
{ for (int ix=0;ix<num_indices[id];++ix) { ++histogram[log2_capped(metrics[ix].first)].first; } }
#pragma omp task
{ for (int ix=0;ix<num_indices[id];++ix) { ++histogram[log2_capped(metrics[ix].cnt)].cnt; } }
#pragma omp task
{ for (int ix=0;ix<num_indices[id];++ix) { ++histogram[log2_capped(metrics[ix].min)].min; } }
#pragma omp task
{ for (int ix=0;ix<num_indices[id];++ix) { ++histogram[log2_capped(metrics[ix].max)].max; } }
#pragma omp task
{ for (int ix=0;ix<num_indices[id];++ix) { ++histogram[log2_capped(metrics[ix].sum)].sum; } }
#pragma omp task
{ for (int ix=0;ix<num_indices[id];++ix) { ++histogram[log2_capped(metrics[ix].avg)].avg; } }
#pragma omp task
{ for (int ix=0;ix<num_indices[id];++ix) { ++histogram[log2_capped(metrics[ix].variance)].variance; } }
#pragma omp task
{ for (int ix=0;ix<num_indices[id];++ix) { ++histogram[log2_capped(metrics[ix].stdev)].stdev; } }
}
}
} else {
/**
* if metrics is null, compute histogram from the full data
*/
for (int ix=0;ix<num_indices[id];++ix) {
for (int tid=0;tid<NUM_PROCESSES;++tid) {
++histogram[log2_capped(get_stat<T>(tid, id, ix))].none;
}
}
}
// aggressively parallel (for very large)
} else {
// parallel histogram construction
VERBOSE std::cout<<"parallel compute histogram: spawning "<<std::thread::hardware_concurrency()<<" threads"<<std::endl;
std::thread threads[std::thread::hardware_concurrency()];
volatile bool start = false;
for (int thread_id=0;thread_id<(int) std::thread::hardware_concurrency();++thread_id) {
//cout<<" parallel compute histogram: spawning thread "<<thread_id<<std::endl;
threads[thread_id] = std::thread([thread_id, histogram, id, &start, num_buckets, metrics, this]() {
// while (!start) { __sync_synchronize(); }
// compute our slice of the indices
int slice_size = this->num_indices[id] / std::thread::hardware_concurrency();
int start_ix = slice_size * thread_id;
int end_ix = (thread_id == (int) std::thread::hardware_concurrency()-1) ? this->num_indices[id] : slice_size * (thread_id+1);
stat_metrics<long long> * __thread_histogram = new stat_metrics<long long>[num_buckets+1];
for (int i=0;i<num_buckets+1;++i) {
memset(&__thread_histogram[i], 0, sizeof(stat_metrics<long long>));
}
stat_metrics<long long> * thread_histogram = &__thread_histogram[1]; // handle -1s returned by log2_capped
// compute a thread-local histogram for only our slice of the indices
if (metrics) {
/**
* if metrics is non-null, compute histogram from metrics
*/
for (int ix=start_ix;ix<end_ix;++ix) {
++thread_histogram[log2_capped(metrics[ix].first)].first;
++thread_histogram[log2_capped(metrics[ix].cnt)].cnt;
++thread_histogram[log2_capped(metrics[ix].min)].min;
++thread_histogram[log2_capped(metrics[ix].max)].max;
++thread_histogram[log2_capped(metrics[ix].sum)].sum;
++thread_histogram[log2_capped(metrics[ix].avg)].avg;
++thread_histogram[log2_capped(metrics[ix].variance)].variance;
++thread_histogram[log2_capped(metrics[ix].stdev)].stdev;
}
// use atomic primitives to add our histogram amounts to the shared array
for (int bucket=0;bucket<num_buckets;++bucket) {
__sync_fetch_and_add(&histogram[bucket].first, thread_histogram[bucket].first);
__sync_fetch_and_add(&histogram[bucket].cnt, thread_histogram[bucket].cnt);
__sync_fetch_and_add(&histogram[bucket].min, thread_histogram[bucket].min);
__sync_fetch_and_add(&histogram[bucket].max, thread_histogram[bucket].max);
__sync_fetch_and_add(&histogram[bucket].sum, thread_histogram[bucket].sum);
__sync_fetch_and_add(&histogram[bucket].avg, thread_histogram[bucket].avg);
__sync_fetch_and_add(&histogram[bucket].variance, thread_histogram[bucket].variance);
__sync_fetch_and_add(&histogram[bucket].stdev, thread_histogram[bucket].stdev);
}
} else {
/**
* if metrics is null, compute histogram from the full data
*/
for (int ix=start_ix;ix<end_ix;++ix) {
for (int tid=0;tid<NUM_PROCESSES;++tid) {
++thread_histogram[log2_capped(get_stat<T>(tid, id, ix))].none;
}
}
// use atomic primitives to add our histogram amounts to the shared array
for (int bucket=0;bucket<num_buckets;++bucket) {
__sync_fetch_and_add(&histogram[bucket].none, thread_histogram[bucket].none);
}
}
delete[] __thread_histogram;
//cout<<" parallel compute histogram: thread "<<thread_id<<" terminated"<<std::endl;
});
//cout<<" parallel compute histogram: spawned thread "<<thread_id<<std::endl;
}
__sync_synchronize();
start = true;
__sync_synchronize();
VERBOSE std::cout<<"parallel compute histogram: joining "<<std::thread::hardware_concurrency()<<" threads"<<std::endl;
for (int i=0;i<(int) std::thread::hardware_concurrency();++i) {
//cout<<" parallel compute histogram: joining thread "<<i<<std::endl;
threads[i].join();
//cout<<" parallel compute histogram: joined thread "<<i<<std::endl;
}
VERBOSE std::cout<<"parallel compute histogram: joined all."<<std::endl;
}
return histogram;
}
struct histogram_lin_dims {
double first_min;
double first_max;
double first_bucket_size;
double cnt_min;
double cnt_max;
double cnt_bucket_size;
double min_min;
double min_max;
double min_bucket_size;
double max_min;
double max_max;
double max_bucket_size;
double sum_min;
double sum_max;
double sum_bucket_size;
double avg_min;
double avg_max;
double avg_bucket_size;
double variance_min;
double variance_max;
double variance_bucket_size;
double stdev_min;
double stdev_max;
double stdev_bucket_size;
double none_min;
double none_max;
double none_bucket_size;
};
template <typename T>
std::pair<stat_metrics<long long> *, histogram_lin_dims> get_histogram_lin(const gstats_stat_id id, const int num_buckets, stat_metrics<T> * const metrics, const int num_metrics) {
// compute histogram
stat_metrics<long long> * histogram = new stat_metrics<long long>[num_buckets];
acquireLock(&arrays_to_delete_lock);
arrays_to_delete.push_back((stat_metrics<double> *) histogram);
releaseLock(&arrays_to_delete_lock);
for (int i=0;i<num_buckets;++i) {
memset(&histogram[i], 0, sizeof(stat_metrics<long long>));
}
histogram_lin_dims dims;
if (metrics) {
/**
* if metrics is non-null, compute histogram from metrics
*/
dims.first_min = std::numeric_limits<double>::max();
dims.first_max = std::numeric_limits<double>::min();
dims.cnt_min = std::numeric_limits<double>::max();
dims.cnt_max = std::numeric_limits<double>::min();
dims.min_min = std::numeric_limits<double>::max();
dims.min_max = std::numeric_limits<double>::min();
dims.max_min = std::numeric_limits<double>::max();
dims.max_max = std::numeric_limits<double>::min();
dims.sum_min = std::numeric_limits<double>::max();
dims.sum_max = std::numeric_limits<double>::min();
dims.avg_min = std::numeric_limits<double>::max();
dims.avg_max = std::numeric_limits<double>::min();
dims.variance_min = std::numeric_limits<double>::max();
dims.variance_max = std::numeric_limits<double>::min();
dims.stdev_min = std::numeric_limits<double>::max();
dims.stdev_max = std::numeric_limits<double>::min();
for (int ix=0;ix<num_indices[id];++ix) {
dims.first_min = std::min(dims.first_min, (double) metrics[ix].first);
dims.first_max = std::max(dims.first_max, (double) metrics[ix].first);
dims.cnt_min = std::min(dims.cnt_min, (double) metrics[ix].cnt);
dims.cnt_max = std::max(dims.cnt_max, (double) metrics[ix].cnt);
dims.min_min = std::min(dims.min_min, (double) metrics[ix].min);
dims.min_max = std::max(dims.min_max, (double) metrics[ix].min);
dims.max_min = std::min(dims.max_min, (double) metrics[ix].max);
dims.max_max = std::max(dims.max_max, (double) metrics[ix].max);
dims.sum_min = std::min(dims.sum_min, (double) metrics[ix].sum);
dims.sum_max = std::max(dims.sum_max, (double) metrics[ix].sum);
dims.avg_min = std::min(dims.avg_min, (double) metrics[ix].avg);
dims.avg_max = std::max(dims.avg_max, (double) metrics[ix].avg);
dims.variance_min = std::min(dims.variance_min, (double) metrics[ix].variance);
dims.variance_max = std::max(dims.variance_max, (double) metrics[ix].variance);
dims.stdev_min = std::min(dims.stdev_min, (double) metrics[ix].stdev);
dims.stdev_max = std::max(dims.stdev_max, (double) metrics[ix].stdev);
}
dims.first_bucket_size = ((dims.first_max - dims.first_min) / num_buckets);
dims.cnt_bucket_size = ((dims.cnt_max - dims.cnt_min) / num_buckets);
dims.min_bucket_size = ((dims.min_max - dims.min_min) / num_buckets);
dims.max_bucket_size = ((dims.max_max - dims.max_min) / num_buckets);
dims.sum_bucket_size = ((dims.sum_max - dims.sum_min) / num_buckets);
dims.avg_bucket_size = ((dims.avg_max - dims.avg_min) / num_buckets);
dims.variance_bucket_size = ((dims.variance_max - dims.variance_min) / num_buckets);
dims.stdev_bucket_size = ((dims.stdev_max - dims.stdev_min) / num_buckets);
dims.none_bucket_size = ((dims.none_max - dims.none_min) / num_buckets);
for (int ix=0;ix<num_indices[id];++ix) {
if (dims.first_bucket_size) ++histogram[(int) (metrics[ix].first / dims.first_bucket_size)].first;
if (dims.cnt_bucket_size) ++histogram[(int) (metrics[ix].cnt / dims.cnt_bucket_size)].cnt;
if (dims.min_bucket_size) ++histogram[(int) (metrics[ix].min / dims.min_bucket_size)].min;
if (dims.max_bucket_size) ++histogram[(int) (metrics[ix].max / dims.max_bucket_size)].max;
if (dims.sum_bucket_size) ++histogram[(int) (metrics[ix].sum / dims.sum_bucket_size)].sum;
if (dims.avg_bucket_size) ++histogram[(int) (metrics[ix].avg / dims.avg_bucket_size)].avg;
if (dims.variance_bucket_size) ++histogram[(int) (metrics[ix].variance / dims.variance_bucket_size)].variance;
if (dims.stdev_bucket_size) ++histogram[(int) (metrics[ix].stdev / dims.stdev_bucket_size)].stdev;
}
} else {
/**
* if metrics is null, compute histogram from the full data
*/
dims.none_min = std::numeric_limits<double>::max();
dims.none_max = std::numeric_limits<double>::min();
for (int tid=0;tid<NUM_PROCESSES;++tid) {
for (int ix=0;ix<num_indices[id];++ix) {
auto s = (double) get_stat<T>(tid, id, ix);
if (s == 0) continue;
dims.none_min = std::min(dims.none_min, s);
dims.none_max = std::max(dims.none_max, s);
}
}
dims.none_bucket_size = ((dims.none_max - dims.none_min) / num_buckets);
for (int tid=0;tid<NUM_PROCESSES;++tid) {
for (int ix=0;ix<num_indices[id];++ix) {
auto s = (double) get_stat<T>(tid, id, ix);
if (s == 0) continue;
if (dims.none_bucket_size > 1e-6) {
++histogram[(int) ((s - dims.none_min) / dims.none_bucket_size)].none;
} else {
++histogram[0].none;
}
}
}
}
return std::pair<stat_metrics<long long> *, histogram_lin_dims>(histogram, dims);
}
void compute_before_printing() {
if (already_computed_stats) return;
// std::cout<<"start compute_before_printing()..."<<std::endl;
#pragma omp parallel
{
#pragma omp single nowait
{
for (int id=0;id<num_stats;++id) {
if (this->data_types[id] == LONG_LONG) {
#pragma omp task
{ this->computed_gstats_total[id] = (stat_metrics<double> *) compute_stat_metrics_total<long long>(id); }
// WARNING: compute_stat_metrics_INDEX SETS this->num_indices[id], WHICH IS NEEDED FOR GET_HISTOGRAM_... CALLS AND print_all!
#pragma omp task
{ this->computed_gstats_by_index[id] = (stat_metrics<double> *) compute_stat_metrics_by_index<long long>(id, &this->num_indices[id]); }
#pragma omp task
{ this->computed_gstats_by_thread[id] = (stat_metrics<double> *) compute_stat_metrics_by_thread<long long>(id); }
} else {
#pragma omp task
{ this->computed_gstats_total[id] = compute_stat_metrics_total<double>(id); }
// WARNING: compute_stat_metrics_INDEX SETS this->num_indices[id], WHICH IS NEEDED FOR GET_HISTOGRAM_... CALLS AND print_all!
#pragma omp task
{ this->computed_gstats_by_index[id] = compute_stat_metrics_by_index<double>(id, &this->num_indices[id]); }
#pragma omp task
{ this->computed_gstats_by_thread[id] = compute_stat_metrics_by_thread<double>(id); }
}
}
}
}
already_computed_stats = true;
// // parallel statistics computation
// VERBOSE std::cout<<"parallel stats compute before printing: spawning "<<(3*num_stats)<<" threads"<<std::endl;
// std::thread threads[3*num_stats];
// for (int id=0;id<num_stats;++id) {
// if (this->data_types[id] == LONG_LONG) {
// threads[3*id+0] = std::thread([this,id]() { this->computed_gstats_total[id] = (stat_metrics<double> *) compute_stat_metrics_total<long long>(id); });
// // WARNING: compute_stat_metrics_INDEX SETS this->num_indices[id], WHICH IS NEEDED FOR GET_HISTOGRAM_... CALLS AND print_all!
// threads[3*id+1] = std::thread([this,id]() { this->computed_gstats_by_index[id] = (stat_metrics<double> *) compute_stat_metrics_by_index<long long>(id, &this->num_indices[id]); });
// threads[3*id+2] = std::thread([this,id]() { this->computed_gstats_by_thread[id] = (stat_metrics<double> *) compute_stat_metrics_by_thread<long long>(id); });
// } else {
// threads[3*id+0] = std::thread([this,id]() { this->computed_gstats_total[id] = compute_stat_metrics_total<double>(id); });
// // WARNING: compute_stat_metrics_INDEX SETS this->num_indices[id], WHICH IS NEEDED FOR GET_HISTOGRAM_... CALLS AND print_all!
// threads[3*id+1] = std::thread([this,id]() { this->computed_gstats_by_index[id] = compute_stat_metrics_by_index<double>(id, &this->num_indices[id]); });
// threads[3*id+2] = std::thread([this,id]() { this->computed_gstats_by_thread[id] = compute_stat_metrics_by_thread<double>(id); });
// }
// }
// VERBOSE std::cout<<"parallel stats compute before printing: joining "<<(3*num_stats)<<" threads"<<std::endl;
// for (int i=0;i<3*num_stats;++i) {
// threads[i].join();
// }
// VERBOSE std::cout<<"parallel stats compute before printing: joined all."<<std::endl;
// __sync_synchronize();
// already_computed_stats = true;
// std::cout<<"finished compute_before_printing()."<<std::endl;
}
public:
template <typename T>
T get_sum(const gstats_stat_id id) {
T sum = 0;
for (int tid=0;tid<NUM_PROCESSES;++tid) {
int size = thread_data[tid].size[id];
auto data = thread_data[tid].get_ptr<T>(id);
for (int ix=0;ix<size;++ix) {
if (data[ix] == 0) continue;
sum += data[ix];
}
}
return sum;
}
template <typename T>
stat_metrics<T> * compute_stat_metrics(const gstats_stat_id id, gstats_enum_aggregation_granularity granularity) {
switch (granularity) {
case TOTAL:
if (already_computed_stats) return (stat_metrics<T> *) computed_gstats_total[id];
setbench_error("functionality disabled because it is very heavyweight, and is easy to misuse, biasing results. run print_stat() before calling this, instead.");
// else if (this->data_types[id] == LONG_LONG) return compute_stat_metrics_total<long long>(id);
// return compute_stat_metrics_total<double>(id);
case BY_INDEX:
if (already_computed_stats) return (stat_metrics<T> *) computed_gstats_by_index[id];
setbench_error("functionality disabled because it is very heavyweight, and is easy to misuse, biasing results. run print_stat() before calling this, instead.");
// else if (this->data_types[id] == LONG_LONG) return compute_stat_metrics_by_index<long long>(id);
// return compute_stat_metrics_by_index<double>(id);
case BY_THREAD:
if (already_computed_stats) return (stat_metrics<T> *) computed_gstats_by_thread[id];
setbench_error("functionality disabled because it is very heavyweight, and is easy to misuse, biasing results. run print_stat() before calling this, instead.");
// else if (this->data_types[id] == LONG_LONG) return compute_stat_metrics_by_thread<long long>(id);
// return compute_stat_metrics_by_thread<double>(id);
default:
setbench_error("should not get here");
break;
}
}
template <typename T>
void print_stat(const gstats_stat_id id, gstats_output_item& output_item) {
assert(id >= 0 && id < num_stats);
//cout<<"printing stat "<<id_to_name[id]<<" with id "<<id<<std::endl;
compute_before_printing();
std::string granularity_str;
stat_metrics<T> * metrics = NULL;
int num_metrics = 0;
switch (output_item.granularity) {
case FULL_DATA:
metrics = NULL;
num_metrics = -1;
granularity_str="_full_data";
break;
case TOTAL:
metrics = (stat_metrics<T> *) computed_gstats_total[id];
num_metrics = 1;
granularity_str="_total";
break;
case BY_INDEX:
metrics = (stat_metrics<T> *) computed_gstats_by_index[id];
num_metrics = num_indices[id];
granularity_str="_by_index";
break;
case BY_THREAD:
metrics = (stat_metrics<T> *) computed_gstats_by_thread[id];
num_metrics = NUM_PROCESSES;
granularity_str="_by_thread";
break;
}
if (output_item.func == NONE) {
if (output_item.granularity != FULL_DATA) setbench_error("must use aggregation granularity FULL_DATA when using aggregation function NONE");
}
if (output_item.granularity == FULL_DATA) {
if (output_item.func != NONE) setbench_error("must use aggregation function NONE when using aggregation granularity FULL_DATA");
}
switch (output_item.method) {
case PRINT_RAW:
{
switch (output_item.func) {
case FIRST: GSTATS_PRINT_AGG(granularity_str, FIRST, id, metrics, num_metrics); break;
case COUNT: GSTATS_PRINT_AGG(granularity_str, COUNT, id, metrics, num_metrics); break;
case MIN: GSTATS_PRINT_AGG(granularity_str, MIN, id, metrics, num_metrics); break;
case MAX: GSTATS_PRINT_AGG(granularity_str, MAX, id, metrics, num_metrics); break;
case SUM: GSTATS_PRINT_AGG(granularity_str, SUM, id, metrics, num_metrics); break;
case AVERAGE: GSTATS_PRINT_AGG(granularity_str, AVERAGE, id, metrics, num_metrics); break;
case VARIANCE: GSTATS_PRINT_AGG(granularity_str, VARIANCE, id, metrics, num_metrics); break;
case STDEV: GSTATS_PRINT_AGG(granularity_str, STDEV, id, metrics, num_metrics); break;
case NONE: GSTATS_PRINT_AGG(granularity_str, NONE, id, metrics, num_metrics); break;
default: setbench_error("should not reach here");
}
} break;
case PRINT_HISTOGRAM_LOG:
{
if (output_item.granularity == TOTAL) setbench_error("aggregation granularity TOTAL should not be used with HISTOGRAM output (since the histogram will simply plot a single point)");
switch (output_item.func) {
case FIRST: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, FIRST, metrics, num_metrics); break;
case COUNT: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, COUNT, metrics, num_metrics); break;
case MIN: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, MIN, metrics, num_metrics); break;
case MAX: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, MAX, metrics, num_metrics); break;
case SUM: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, SUM, metrics, num_metrics); break;
case AVERAGE: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, AVERAGE, metrics, num_metrics); break;
case VARIANCE: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, VARIANCE, metrics, num_metrics); break;
case STDEV: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, STDEV, metrics, num_metrics); break;
case NONE: GSTATS_PRINT_HISTOGRAM_LOG(id, granularity_str, NONE, metrics, num_metrics); break;
default: setbench_error("should not reach here"); break;
}
} break;
case PRINT_HISTOGRAM_LIN:
{
if (output_item.granularity == TOTAL) setbench_error("aggregation granularity TOTAL should not be used with HISTOGRAM output (since the histogram will simply plot a single point)");
switch (output_item.func) {
case FIRST: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, FIRST, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
case COUNT: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, COUNT, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
case MIN: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, MIN, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
case MAX: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, MAX, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
case SUM: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, SUM, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
case AVERAGE: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, AVERAGE, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
case VARIANCE: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, VARIANCE, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
case STDEV: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, STDEV, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
case NONE: GSTATS_PRINT_HISTOGRAM_LIN(id, granularity_str, NONE, metrics, num_metrics, output_item.num_buckets_if_histogram_lin); break;
default: setbench_error("should not reach here"); break;
}
} break;
case PRINT_TO_FILE:
{
assert(output_item.granularity == FULL_DATA);
assert(output_item.func == NONE);
// // is there any actual data to write?
// int rows_to_write = 0;
// for (int __tid=0;__tid<NUM_PROCESSES;++__tid) {
// if (thread_data[__tid].size[id]) {
// for (int __ix=0;__ix<thread_data[__tid].size[id];++__ix) {
// rows_to_write += (get_stat<T>(__tid, id, __ix) != std::numeric_limits<T>::max()
// && get_stat<T>(__tid, id, __ix) != std::numeric_limits<T>::min());
// }
// }
// }
// // if so, create an output file
// if (rows_to_write) {
std::ofstream ofile;
if (output_item.output_filename) {
ofile.open(output_item.output_filename);
} else {
std::stringstream ss;
ss<<id_to_name[id];
ss<<".txt";
ofile.open(ss.str());
}
for (int __tid=0;__tid<NUM_PROCESSES;++__tid) {
if (thread_data[__tid].size[id]) {
for (int __ix=0;__ix<thread_data[__tid].size[id];++__ix) {
ofile<<__tid;
ofile<<" "<<__ix;
if (get_stat<T>(__tid, id, __ix) == std::numeric_limits<T>::max() || get_stat<T>(__tid, id, __ix) == std::numeric_limits<T>::min()) {
ofile<<" "<<"0";
} else {
ofile<<" "<<get_stat<T>(__tid, id, __ix);
}
ofile<<std::endl;
}
}
}
ofile.close();
// }
} break;
default: setbench_error("should not reach here"); break;
}
}
void print_all() {
for (auto it = output_config.begin(); it != output_config.end(); it++) {
GSTATS_USE_TEMPLATE(it->first, print_stat, it->first GSTATS_COMMA it->second);
}
}
};
#endif /* GSTATS_H */
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
for-5.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
extern void bar(int);
void foo (int n)
{
int i;
#pragma omp for schedule(guided)
for (i = 0; i < n; ++i)
bar(i);
}
/* { dg-final { scan-tree-dump-times "GOMP_loop_guided_start" 1 "ompexp" } } */
/* { dg-final { scan-tree-dump-times "GOMP_loop_guided_next" 1 "ompexp" } } */
/* { dg-final { cleanup-tree-dump "ompexp" } } */
|
find.h | // -*- C++ -*-
// Copyright (C) 2007, 2008 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 2, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this library; see the file COPYING. If not, write to
// the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
// MA 02111-1307, USA.
// As a special exception, you may use this file as part of a free
// software library without restriction. Specifically, if other files
// instantiate templates or use macros or inline functions from this
// file, or you compile this file and link it with other files to
// produce an executable, this file does not by itself cause the
// resulting executable to be covered by the GNU General Public
// License. This exception does not however invalidate any other
// reasons why the executable file might be covered by the GNU General
// Public License.
/** @file parallel/find.h
* @brief Parallel implementation base for std::find(), std::equal()
* and related functions.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze and Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_FIND_H
#define _GLIBCXX_PARALLEL_FIND_H 1
#include <bits/stl_algobase.h>
#include <parallel/features.h>
#include <parallel/parallel.h>
#include <parallel/compatibility.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Parallel std::find, switch for different algorithms.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence. Must have same
* length as first sequence.
* @param pred Find predicate.
* @param selector Functionality (e. g. std::find_if (), std::equal(),...)
* @return Place of finding in both sequences.
*/
template<typename RandomAccessIterator1,
typename RandomAccessIterator2,
typename Pred,
typename Selector>
inline std::pair<RandomAccessIterator1, RandomAccessIterator2>
find_template(RandomAccessIterator1 begin1, RandomAccessIterator1 end1,
RandomAccessIterator2 begin2, Pred pred, Selector selector)
{
switch (_Settings::get().find_algorithm)
{
case GROWING_BLOCKS:
return find_template(begin1, end1, begin2, pred, selector,
growing_blocks_tag());
case CONSTANT_SIZE_BLOCKS:
return find_template(begin1, end1, begin2, pred, selector,
constant_size_blocks_tag());
case EQUAL_SPLIT:
return find_template(begin1, end1, begin2, pred, selector,
equal_split_tag());
default:
_GLIBCXX_PARALLEL_ASSERT(false);
return std::make_pair(begin1, begin2);
}
}
#if _GLIBCXX_FIND_EQUAL_SPLIT
/**
* @brief Parallel std::find, equal splitting variant.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence. Second sequence
* must have same length as first sequence.
* @param pred Find predicate.
* @param selector Functionality (e. g. std::find_if (), std::equal(),...)
* @return Place of finding in both sequences.
*/
template<typename RandomAccessIterator1,
typename RandomAccessIterator2,
typename Pred,
typename Selector>
std::pair<RandomAccessIterator1, RandomAccessIterator2>
find_template(RandomAccessIterator1 begin1,
RandomAccessIterator1 end1,
RandomAccessIterator2 begin2,
Pred pred,
Selector selector,
equal_split_tag)
{
_GLIBCXX_CALL(end1 - begin1)
typedef std::iterator_traits<RandomAccessIterator1> traits_type;
typedef typename traits_type::difference_type difference_type;
typedef typename traits_type::value_type value_type;
difference_type length = end1 - begin1;
difference_type result = length;
difference_type* borders;
omp_lock_t result_lock;
omp_init_lock(&result_lock);
thread_index_t num_threads = get_max_threads();
# pragma omp parallel num_threads(num_threads)
{
# pragma omp single
{
num_threads = omp_get_num_threads();
borders = new difference_type[num_threads + 1];
equally_split(length, num_threads, borders);
} //single
thread_index_t iam = omp_get_thread_num();
difference_type start = borders[iam], stop = borders[iam + 1];
RandomAccessIterator1 i1 = begin1 + start;
RandomAccessIterator2 i2 = begin2 + start;
for (difference_type pos = start; pos < stop; ++pos)
{
#pragma omp flush(result)
// Result has been set to something lower.
if (result < pos)
break;
if (selector(i1, i2, pred))
{
omp_set_lock(&result_lock);
if (pos < result)
result = pos;
omp_unset_lock(&result_lock);
break;
}
++i1;
++i2;
}
} //parallel
omp_destroy_lock(&result_lock);
delete[] borders;
return
std::pair<RandomAccessIterator1, RandomAccessIterator2>(begin1 + result,
begin2 + result);
}
#endif
#if _GLIBCXX_FIND_GROWING_BLOCKS
/**
* @brief Parallel std::find, growing block size variant.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence. Second sequence
* must have same length as first sequence.
* @param pred Find predicate.
* @param selector Functionality (e. g. std::find_if (), std::equal(),...)
* @return Place of finding in both sequences.
* @see __gnu_parallel::_Settings::find_sequential_search_size
* @see __gnu_parallel::_Settings::find_initial_block_size
* @see __gnu_parallel::_Settings::find_maximum_block_size
* @see __gnu_parallel::_Settings::find_increasing_factor
*
* There are two main differences between the growing blocks and
* the constant-size blocks variants.
* 1. For GB, the block size grows; for CSB, the block size is fixed.
* 2. For GB, the blocks are allocated dynamically;
* for CSB, the blocks are allocated in a predetermined manner,
* namely spacial round-robin.
*/
template<typename RandomAccessIterator1,
typename RandomAccessIterator2,
typename Pred,
typename Selector>
std::pair<RandomAccessIterator1, RandomAccessIterator2>
find_template(RandomAccessIterator1 begin1, RandomAccessIterator1 end1,
RandomAccessIterator2 begin2, Pred pred, Selector selector,
growing_blocks_tag)
{
_GLIBCXX_CALL(end1 - begin1)
typedef std::iterator_traits<RandomAccessIterator1> traits_type;
typedef typename traits_type::difference_type difference_type;
typedef typename traits_type::value_type value_type;
const _Settings& __s = _Settings::get();
difference_type length = end1 - begin1;
difference_type sequential_search_size =
std::min<difference_type>(length, __s.find_sequential_search_size);
// Try it sequentially first.
std::pair<RandomAccessIterator1, RandomAccessIterator2> find_seq_result =
selector.sequential_algorithm(
begin1, begin1 + sequential_search_size, begin2, pred);
if (find_seq_result.first != (begin1 + sequential_search_size))
return find_seq_result;
// Index of beginning of next free block (after sequential find).
difference_type next_block_start = sequential_search_size;
difference_type result = length;
omp_lock_t result_lock;
omp_init_lock(&result_lock);
thread_index_t num_threads = get_max_threads();
# pragma omp parallel shared(result) num_threads(num_threads)
{
# pragma omp single
num_threads = omp_get_num_threads();
// Not within first k elements -> start parallel.
thread_index_t iam = omp_get_thread_num();
difference_type block_size = __s.find_initial_block_size;
difference_type start =
fetch_and_add<difference_type>(&next_block_start, block_size);
// Get new block, update pointer to next block.
difference_type stop =
std::min<difference_type>(length, start + block_size);
std::pair<RandomAccessIterator1, RandomAccessIterator2> local_result;
while (start < length)
{
# pragma omp flush(result)
// Get new value of result.
if (result < start)
{
// No chance to find first element.
break;
}
local_result = selector.sequential_algorithm(
begin1 + start, begin1 + stop, begin2 + start, pred);
if (local_result.first != (begin1 + stop))
{
omp_set_lock(&result_lock);
if ((local_result.first - begin1) < result)
{
result = local_result.first - begin1;
// Result cannot be in future blocks, stop algorithm.
fetch_and_add<difference_type>(&next_block_start, length);
}
omp_unset_lock(&result_lock);
}
block_size =
std::min<difference_type>(block_size * __s.find_increasing_factor,
__s.find_maximum_block_size);
// Get new block, update pointer to next block.
start =
fetch_and_add<difference_type>(&next_block_start, block_size);
stop = ((length < (start + block_size))
? length : (start + block_size));
}
} //parallel
omp_destroy_lock(&result_lock);
// Return iterator on found element.
return
std::pair<RandomAccessIterator1, RandomAccessIterator2>(begin1 + result,
begin2 + result);
}
#endif
#if _GLIBCXX_FIND_CONSTANT_SIZE_BLOCKS
/**
* @brief Parallel std::find, constant block size variant.
* @param begin1 Begin iterator of first sequence.
* @param end1 End iterator of first sequence.
* @param begin2 Begin iterator of second sequence. Second sequence
* must have same length as first sequence.
* @param pred Find predicate.
* @param selector Functionality (e. g. std::find_if (), std::equal(),...)
* @return Place of finding in both sequences.
* @see __gnu_parallel::_Settings::find_sequential_search_size
* @see __gnu_parallel::_Settings::find_block_size
* There are two main differences between the growing blocks and the
* constant-size blocks variants.
* 1. For GB, the block size grows; for CSB, the block size is fixed.
* 2. For GB, the blocks are allocated dynamically; for CSB, the
* blocks are allocated in a predetermined manner, namely spacial
* round-robin.
*/
template<typename RandomAccessIterator1,
typename RandomAccessIterator2,
typename Pred,
typename Selector>
std::pair<RandomAccessIterator1, RandomAccessIterator2>
find_template(RandomAccessIterator1 begin1, RandomAccessIterator1 end1,
RandomAccessIterator2 begin2, Pred pred, Selector selector,
constant_size_blocks_tag)
{
_GLIBCXX_CALL(end1 - begin1)
typedef std::iterator_traits<RandomAccessIterator1> traits_type;
typedef typename traits_type::difference_type difference_type;
typedef typename traits_type::value_type value_type;
const _Settings& __s = _Settings::get();
difference_type length = end1 - begin1;
difference_type sequential_search_size = std::min<difference_type>(
length, __s.find_sequential_search_size);
// Try it sequentially first.
std::pair<RandomAccessIterator1, RandomAccessIterator2> find_seq_result =
selector.sequential_algorithm(begin1, begin1 + sequential_search_size,
begin2, pred);
if (find_seq_result.first != (begin1 + sequential_search_size))
return find_seq_result;
difference_type result = length;
omp_lock_t result_lock;
omp_init_lock(&result_lock);
// Not within first sequential_search_size elements -> start parallel.
thread_index_t num_threads = get_max_threads();
# pragma omp parallel shared(result) num_threads(num_threads)
{
# pragma omp single
num_threads = omp_get_num_threads();
thread_index_t iam = omp_get_thread_num();
difference_type block_size = __s.find_initial_block_size;
// First element of thread's current iteration.
difference_type iteration_start = sequential_search_size;
// Where to work (initialization).
difference_type start = iteration_start + iam * block_size;
difference_type stop =
std::min<difference_type>(length, start + block_size);
std::pair<RandomAccessIterator1, RandomAccessIterator2> local_result;
while (start < length)
{
// Get new value of result.
# pragma omp flush(result)
// No chance to find first element.
if (result < start)
break;
local_result = selector.sequential_algorithm(
begin1 + start, begin1 + stop,
begin2 + start, pred);
if (local_result.first != (begin1 + stop))
{
omp_set_lock(&result_lock);
if ((local_result.first - begin1) < result)
result = local_result.first - begin1;
omp_unset_lock(&result_lock);
// Will not find better value in its interval.
break;
}
iteration_start += num_threads * block_size;
// Where to work.
start = iteration_start + iam * block_size;
stop = std::min<difference_type>(length, start + block_size);
}
} //parallel
omp_destroy_lock(&result_lock);
// Return iterator on found element.
return
std::pair<RandomAccessIterator1, RandomAccessIterator2>(begin1 + result,
begin2 + result);
}
#endif
} // end namespace
#endif
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_specialization_constants_buffer,
kind_stream,
kind_last = kind_stream
};
public:
SYCLIntegrationHeader(Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(StringRef MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(const FunctionDecl *SyclKernel, QualType KernelNameType,
SourceLocation Loc, bool IsESIMD, bool IsUnnamedKernel);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
/// Update the names of a kernel description based on its SyclKernel.
void updateKernelNames(const FunctionDecl *SyclKernel, StringRef Name,
StringRef StableName) {
auto Itr = llvm::find_if(KernelDescs, [SyclKernel](const KernelDesc &KD) {
return KD.SyclKernel == SyclKernel;
});
assert(Itr != KernelDescs.end() && "Unknown kernel description");
Itr->updateKernelNames(Name, StableName);
}
/// Note which free functions (this_id, this_item, etc) are called within the
/// kernel
void setCallsThisId(bool B);
void setCallsThisItem(bool B);
void setCallsThisNDItem(bool B);
void setCallsThisGroup(bool B);
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// there are four free functions the kernel may call (this_id, this_item,
// this_nd_item, this_group)
struct KernelCallsSYCLFreeFunction {
bool CallsThisId = false;
bool CallsThisItem = false;
bool CallsThisNDItem = false;
bool CallsThisGroup = false;
};
// Kernel invocation descriptor
struct KernelDesc {
/// sycl_kernel function associated with this kernel.
const FunctionDecl *SyclKernel;
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Whether this kernel is an ESIMD one.
bool IsESIMDKernel;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
// Whether kernel calls any of the SYCL free functions (this_item(),
// this_id(), etc)
KernelCallsSYCLFreeFunction FreeFunctionCalls;
// If we are in unnamed kernel/lambda mode AND this is one that the user
// hasn't provided an explicit name for.
bool IsUnnamedKernel;
KernelDesc(const FunctionDecl *SyclKernel, QualType NameType,
SourceLocation KernelLoc, bool IsESIMD, bool IsUnnamedKernel)
: SyclKernel(SyclKernel), NameType(NameType), KernelLocation(KernelLoc),
IsESIMDKernel(IsESIMD), IsUnnamedKernel(IsUnnamedKernel) {}
void updateKernelNames(StringRef Name, StringRef StableName) {
this->Name = Name.str();
this->StableName = StableName.str();
}
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
Sema &S;
};
class SYCLIntegrationFooter {
public:
SYCLIntegrationFooter(Sema &S) : S(S) {}
bool emit(StringRef MainSrc);
void addVarDecl(const VarDecl *VD);
private:
bool emit(raw_ostream &O);
Sema &S;
llvm::SmallVector<const VarDecl *> SpecConstants;
void emitSpecIDName(raw_ostream &O, const VarDecl *VD);
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 32;
static const uint64_t MaximumAlignment = 1ull << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The namespace where coroutine components are defined. In standard,
/// they are defined in std namespace. And in the previous implementation,
/// they are defined in std::experimental namespace.
NamespaceDecl *CoroTraitsNamespaceCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// In addition of being constant evaluated, the current expression
/// occurs in an immediate function context - either a consteval function
/// or a consteval if function.
ImmediateFunctionContext,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
// A context can be nested in both a discarded statement context and
// an immediate function context, so they need to be tracked independently.
bool InDiscardedStatement;
bool InImmediateFunctionContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext),
InDiscardedStatement(false), InImmediateFunctionContext(false) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated ||
Context == ExpressionEvaluationContext::ImmediateFunctionContext;
}
bool isImmediateFunctionContext() const {
return Context == ExpressionEvaluationContext::ImmediateFunctionContext ||
(Context == ExpressionEvaluationContext::DiscardedStatement &&
InImmediateFunctionContext);
}
bool isDiscardedStatementContext() const {
return Context == ExpressionEvaluationContext::DiscardedStatement ||
(Context ==
ExpressionEvaluationContext::ImmediateFunctionContext &&
InDiscardedStatement);
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
const TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
class GlobalMethodPool {
public:
using Lists = std::pair<ObjCMethodList, ObjCMethodList>;
using iterator = llvm::DenseMap<Selector, Lists>::iterator;
iterator begin() { return Methods.begin(); }
iterator end() { return Methods.end(); }
iterator find(Selector Sel) { return Methods.find(Sel); }
std::pair<iterator, bool> insert(std::pair<Selector, Lists> &&Val) {
return Methods.insert(Val);
}
int count(Selector Sel) const { return Methods.count(Sel); }
bool empty() const { return Methods.empty(); }
private:
llvm::DenseMap<Selector, Lists> Methods;
};
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
/// Increment when we find a reference; decrement when we find an ignored
/// assignment. Ultimately the value is 0 if every reference is an ignored
/// assignment.
llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
StringRef Platform);
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// Bitmask to contain the list of reasons a single diagnostic should be
/// emitted, based on its language. This permits multiple offload systems
/// to coexist in the same translation unit.
enum class DeviceDiagnosticReason {
/// Diagnostic doesn't apply to anything. Included for completeness, but
/// should make this a no-op.
None = 0,
/// OpenMP specific diagnostic.
OmpDevice = 1 << 0,
OmpHost = 1 << 1,
OmpAll = OmpDevice | OmpHost,
/// CUDA specific diagnostics.
CudaDevice = 1 << 2,
CudaHost = 1 << 3,
CudaAll = CudaDevice | CudaHost,
/// SYCL specific diagnostic.
Sycl = 1 << 4,
/// ESIMD specific diagnostic.
Esimd = 1 << 5,
/// A flag representing 'all'. This can be used to avoid the check
/// all-together and make this behave as it did before the
/// DiagnosticReason was added (that is, unconditionally emit).
/// Note: This needs to be updated if any flags above are added.
All = OmpAll | CudaAll | Sycl | Esimd,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/All)
};
private:
// A collection of a pair of undefined functions and their callers known
// to be reachable from a routine on the device (kernel or device function).
typedef std::pair<const FunctionDecl *, const FunctionDecl *> CallPair;
llvm::SmallVector<CallPair> UndefinedReachableFromSyclDevice;
public:
// Helper routine to add a pair of Callee-Caller pair of FunctionDecl *
// to UndefinedReachableFromSyclDevice.
void addFDToReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
UndefinedReachableFromSyclDevice.push_back(std::make_pair(Callee, Caller));
}
// Helper routine to check if a pair of Callee-Caller FunctionDecl *
// is in UndefinedReachableFromSyclDevice.
bool isFDReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
return llvm::any_of(UndefinedReachableFromSyclDevice,
[Callee, Caller](const CallPair &P) {
return P.first == Callee && P.second == Caller;
});
}
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S, DeviceDiagnosticReason R);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second
<< std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second.AddFixItHint(
Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether deferrable diagnostics should be deferred.
bool DeferDiags = false;
/// RAII class to control scope of DeferDiags.
class DeferDiagsRAII {
Sema &S;
bool SavedDeferDiags = false;
public:
DeferDiagsRAII(Sema &S, bool DeferDiags)
: S(S), SavedDeferDiags(S.DeferDiags) {
S.DeferDiags = DeferDiags;
}
~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
};
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// Retrieve the current function, if any, that should be analyzed for
/// potential availability violations.
sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
SYCLIntelFPGALoopCountAttr *
BuildSYCLIntelFPGALoopCountAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *
BuildSYCLIntelFPGAInitiationIntervalAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAMaxConcurrencyAttr *
BuildSYCLIntelFPGAMaxConcurrencyAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGAMaxInterleavingAttr *
BuildSYCLIntelFPGAMaxInterleavingAttr(const AttributeCommonInfo &CI, Expr *E);
SYCLIntelFPGASpeculatedIterationsAttr *
BuildSYCLIntelFPGASpeculatedIterationsAttr(const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGALoopCoalesceAttr *
BuildSYCLIntelFPGALoopCoalesceAttr(const AttributeCommonInfo &CI, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildBitIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
/// Helper function to judge if we are in module purview.
/// Return false if we are not in a module.
bool isCurrentModulePurview() const {
return getCurrentModule() ? getCurrentModule()->isModulePurview() : false;
}
/// Enter the scope of the global module.
Module *PushGlobalModuleFragment(SourceLocation BeginLoc, bool IsImplicit);
/// Leave the scope of the global module.
void PopGlobalModuleFragment();
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
// Returns the underlying type of a decltype with the given expression.
QualType getDecltypeForExpr(Expr *E);
QualType BuildTypeofExprType(Expr *E);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
void warnOnReservedIdentifier(const NamedDecl *D);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
/// Merge availability attributes for an implementation of
/// an optional protocol requirement.
AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
ErrorAttr *mergeErrorAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef NewUserDiagnostic);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
BTFDeclTagAttr *mergeBTFDeclTagAttr(Decl *D, const BTFDeclTagAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(QualType Param, QualType Arg);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ExplicitBool, ///< Condition in an explicit(bool) specifier.
CCEK_Noexcept ///< Condition in a noexcept(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id,
bool IsUDSuffix);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
DeviceDiagnosticReason getEmissionReason(const FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str,
const StringLiteral *Literal,
bool &HasDefault, bool &HasCommas,
SmallVectorImpl<StringRef> &Strings);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, IfStatementKind StatementKind,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
struct NamedReturnInfo {
const VarDecl *Candidate;
enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
Status S;
bool isMoveEligible() const { return S != None; };
bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn };
NamedReturnInfo getNamedReturnInfo(
Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal);
NamedReturnInfo getNamedReturnInfo(const VarDecl *VD);
const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
QualType ReturnType);
ExprResult
PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const NamedReturnInfo &NRInfo, Expr *Value,
bool SupressSimplerImplicitMoves = false);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
NamedReturnInfo &NRInfo,
bool SupressSimplerImplicitMoves);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// If VD is set but not otherwise used, diagnose, for a parameter or a
/// variable.
void DiagnoseUnusedButSetDecl(const VarDecl *VD);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false,
ArrayRef<const Expr *> StopAt = None);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the statements's reachability
/// analysis.
///
/// \param Stmts If Stmts is non-empty, delay reporting the diagnostic until
/// the function body is parsed, and then do a basic reachability analysis to
/// determine if the statement is reachable. If it is unreachable, the
/// diagnostic will not be emitted.
bool DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
const PartialDiagnostic &PD);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
TypeSourceInfo *TSI);
ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen,
ParsedType ParsedTy);
ExprResult BuildSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
ExprResult ActOnSYCLUniqueStableIdExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
MultiExprArg CallArgs);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
NamespaceDecl *getCachedCoroNamespace() { return CoroTraitsNamespaceCache; }
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc,
const LookupResult *R = nullptr,
const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation,
bool IsUsingIfExists);
NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc,
SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
// Checks that the vector type should be initialized from a scalar
// by splatting the value rather than populating a single element.
// This is the case for AltiVecVector types as well as with
// AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified.
bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy);
// Checks if the -faltivec-src-compat=gcc option is specified.
// If so, AltiVecVector, AltiVecBool and AltiVecPixel types are
// treated the same way as they are when trying to initialize
// these vectors on gcc (an error is emitted).
bool CheckAltivecInitFromScalar(SourceRange R, QualType VecTy,
QualType SrcTy);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
// Complete an enum decl, maybe without a scope spec.
bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool BuildTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc,
bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
// Substitute auto in TypeWithAuto for a Dependent auto type
QualType SubstAutoTypeDependent(QualType TypeWithAuto);
// Substitute auto in TypeWithAuto for a Dependent auto type
TypeSourceInfo *
SubstAutoTypeSourceInfoDependent(TypeSourceInfo *TypeWithAuto);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
bool isImmediateFunctionContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isImmediateFunctionContext();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool SubstTypeConstraint(TemplateTypeParmDecl *Inst, const TypeConstraint *TC,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
template <typename AttrType>
void addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr, Expr *ZDimExpr);
void AddWorkGroupSizeHintAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDim, Expr *YDim, Expr *ZDim);
WorkGroupSizeHintAttr *
MergeWorkGroupSizeHintAttr(Decl *D, const WorkGroupSizeHintAttr &A);
void AddIntelReqdSubGroupSize(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelReqdSubGroupSizeAttr *
MergeIntelReqdSubGroupSizeAttr(Decl *D, const IntelReqdSubGroupSizeAttr &A);
IntelNamedSubGroupSizeAttr *
MergeIntelNamedSubGroupSizeAttr(Decl *D, const IntelNamedSubGroupSizeAttr &A);
void AddSYCLIntelNumSimdWorkItemsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNumSimdWorkItemsAttr *
MergeSYCLIntelNumSimdWorkItemsAttr(Decl *D,
const SYCLIntelNumSimdWorkItemsAttr &A);
void AddSYCLIntelESimdVectorizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelESimdVectorizeAttr *
MergeSYCLIntelESimdVectorizeAttr(Decl *D,
const SYCLIntelESimdVectorizeAttr &A);
void AddSYCLIntelSchedulerTargetFmaxMhzAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelSchedulerTargetFmaxMhzAttr *MergeSYCLIntelSchedulerTargetFmaxMhzAttr(
Decl *D, const SYCLIntelSchedulerTargetFmaxMhzAttr &A);
void AddSYCLIntelNoGlobalWorkOffsetAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNoGlobalWorkOffsetAttr *MergeSYCLIntelNoGlobalWorkOffsetAttr(
Decl *D, const SYCLIntelNoGlobalWorkOffsetAttr &A);
void AddSYCLIntelLoopFuseAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelLoopFuseAttr *
MergeSYCLIntelLoopFuseAttr(Decl *D, const SYCLIntelLoopFuseAttr &A);
void AddIntelFPGAPrivateCopiesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGAMaxReplicatesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAMaxReplicatesAttr *
MergeIntelFPGAMaxReplicatesAttr(Decl *D, const IntelFPGAMaxReplicatesAttr &A);
void AddIntelFPGAForcePow2DepthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAForcePow2DepthAttr *
MergeIntelFPGAForcePow2DepthAttr(Decl *D,
const IntelFPGAForcePow2DepthAttr &A);
void AddSYCLIntelFPGAInitiationIntervalAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *MergeSYCLIntelFPGAInitiationIntervalAttr(
Decl *D, const SYCLIntelFPGAInitiationIntervalAttr &A);
SYCLIntelFPGAMaxConcurrencyAttr *MergeSYCLIntelFPGAMaxConcurrencyAttr(
Decl *D, const SYCLIntelFPGAMaxConcurrencyAttr &A);
void AddSYCLIntelMaxGlobalWorkDimAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelMaxGlobalWorkDimAttr *
MergeSYCLIntelMaxGlobalWorkDimAttr(Decl *D,
const SYCLIntelMaxGlobalWorkDimAttr &A);
void AddIntelFPGABankWidthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGABankWidthAttr *
MergeIntelFPGABankWidthAttr(Decl *D, const IntelFPGABankWidthAttr &A);
void AddIntelFPGANumBanksAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGANumBanksAttr *
MergeIntelFPGANumBanksAttr(Decl *D, const IntelFPGANumBanksAttr &A);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
/// AddSYCLIntelFPGAMaxConcurrencyAttr - Adds a max_concurrency attribute to a
/// particular declaration.
void AddSYCLIntelFPGAMaxConcurrencyAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
bool checkAllowedSYCLInitializer(VarDecl *VD);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
/// Lookup 'coroutine_traits' in std namespace and std::experimental
/// namespace. The namespace found is recorded in Namespace.
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc,
NamespaceDecl *&Namespace);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
struct DeclareTargetContextInfo {
struct MapInfo {
OMPDeclareTargetDeclAttr::MapTypeTy MT;
SourceLocation Loc;
};
/// Explicitly listed variables and functions in a 'to' or 'link' clause.
llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
/// The 'device_type' as parsed from the clause.
OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
/// The directive kind, `begin declare target` or `declare target`.
OpenMPDirectiveKind Kind;
/// The directive location.
SourceLocation Loc;
DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
: Kind(Kind), Loc(Loc) {}
};
/// Number of nested '#pragma omp declare target' directives.
SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true,
bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Analyzes and checks a loop nest for use by a loop transformation.
///
/// \param Kind The loop transformation directive kind.
/// \param NumLoops How many nested loops the directive is expecting.
/// \param AStmt Associated statement of the transformation directive.
/// \param LoopHelpers [out] The loop analysis result.
/// \param Body [out] The body code nested in \p NumLoops loop.
/// \param OriginalInits [out] Collection of statements and declarations that
/// must have been executed/declared before entering the
/// loop.
///
/// \return Whether there was any error.
bool checkTransformableLoopNest(
OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
Stmt *&Body,
SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
&OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
/// Called on well-formed '\#pragma omp metadirective' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPMetaDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<std::string> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Called at the end of target region i.e. '#pragma omp end declare target'.
const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
/// Called once a target context is completed, that can be when a
/// '#pragma omp end declare target' was encountered or when a
/// '#pragma omp declare target' without declaration-definition-seq was
/// encountered.
void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// Process a canonical OpenMP loop nest that can either be a canonical
/// literal loop (ForStmt or CXXForRangeStmt), or the generated loop of an
/// OpenMP loop transformation construct.
StmtResult ActOnOpenMPLoopnest(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '#pragma omp unroll' after parsing of its clauses
/// and the associated statement.
StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp loop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPGenericLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \param NumAppendArgs The number of omp_interop_t arguments to account for
/// in checking.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, unsigned NumAppendArgs,
SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
/// \param AdjustArgsNothing The list of 'nothing' arguments.
/// \param AdjustArgsNeedDevicePtr The list of 'need_device_ptr' arguments.
/// \param AppendArgs The list of 'append_args' arguments.
/// \param AdjustArgsLoc The Location of an 'adjust_args' clause.
/// \param AppendArgsLoc The Location of an 'append_args' clause.
/// \param SR The SourceRange of the 'declare variant' directive.
void ActOnOpenMPDeclareVariantDirective(
FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI,
ArrayRef<Expr *> AdjustArgsNothing,
ArrayRef<Expr *> AdjustArgsNeedDevicePtr,
ArrayRef<OMPDeclareVariantAttr::InteropType> AppendArgs,
SourceLocation AdjustArgsLoc, SourceLocation AppendArgsLoc,
SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'align' clause.
OMPClause *ActOnOpenMPAlignClause(Expr *Alignment, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'full' clauses.
OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-form 'partial' clauses.
OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'when' clause.
OMPClause *ActOnOpenMPWhenClause(OMPTraitInfo &TI, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *ActOnOpenMPMapClause(
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs, bool NoDiagnose = false,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// Called on a well-formed 'bind' clause.
OMPClause *ActOnOpenMPBindClause(OpenMPBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult
ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_PRValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
class DeviceDeferredDiagnostic {
public:
DeviceDeferredDiagnostic(SourceLocation SL, const PartialDiagnostic &PD,
DeviceDiagnosticReason R)
: Diagnostic(SL, PD), Reason(R) {}
PartialDiagnosticAt &getDiag() { return Diagnostic; }
DeviceDiagnosticReason getReason() const { return Reason; }
private:
PartialDiagnosticAt Diagnostic;
DeviceDiagnosticReason Reason;
};
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<DeviceDeferredDiagnostic>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the type is allowed to be used for the current target.
void checkTypeSupport(QualType Ty, SourceLocation Loc,
ValueDecl *D = nullptr);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
enum CUDAVariableTarget {
CVT_Device, /// Emitted on device side with a shadow variable on host side
CVT_Host, /// Emitted on host side only
CVT_Both, /// Emitted on both sides with different addresses
CVT_Unified, /// Emitted as a unified address, e.g. managed variables
};
/// Determines whether the given variable is emitted on host or device side.
CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
enum class AttributeCompletion {
Attribute,
Scope,
None,
};
void CodeCompleteAttribute(
AttributeCommonInfo::Syntax Syntax,
AttributeCompletion Completion = AttributeCompletion::Attribute,
const IdentifierInfo *Scope = nullptr);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
void CheckSYCLKernelCall(FunctionDecl *CallerFunc, SourceRange CallLoc,
ArrayRef<const Expr *> Args);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinArithmeticFence(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
bool SemaBuiltinElementwiseMath(CallExpr *TheCall);
bool PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall);
bool SemaBuiltinReduceMath(CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
std::unique_ptr<SYCLIntegrationFooter> SyclIntFooter;
// We need to store the list of the sycl_kernel functions and their associated
// generated OpenCL Kernels so we can go back and re-name these after the
// fact.
llvm::SmallVector<std::pair<const FunctionDecl *, FunctionDecl *>>
SyclKernelsToOpenCLKernels;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool DiagnosingSYCLKernel = false;
public:
void addSyclOpenCLKernel(const FunctionDecl *SyclKernel,
FunctionDecl *OpenCLKernel) {
SyclKernelsToOpenCLKernels.emplace_back(SyclKernel, OpenCLKernel);
}
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(*this);
return *SyclIntHeader.get();
}
SYCLIntegrationFooter &getSyclIntegrationFooter() {
if (SyclIntFooter == nullptr)
SyclIntFooter = std::make_unique<SYCLIntegrationFooter>(*this);
return *SyclIntFooter.get();
}
void addSyclVarDecl(VarDecl *VD) {
if (LangOpts.SYCLIsDevice && !LangOpts.SYCLIntFooter.empty())
getSyclIntegrationFooter().addVarDecl(VD);
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction,
KernelConstStaticVariable
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void copySYCLKernelAttrs(const CXXRecordDecl *KernelObj);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void SetSYCLKernelNames();
void MarkDevices();
/// Get the number of fields or captures within the parsed type.
ExprResult ActOnSYCLBuiltinNumFieldsExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumFieldsExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given field number so that callers
/// can wrap it in a decltype() to get the actual type of the field.
ExprResult ActOnSYCLBuiltinFieldTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinFieldTypeExpr(SourceLocation Loc,
QualType SourceTy, Expr *Idx);
/// Get the number of base classes within the parsed type.
ExprResult ActOnSYCLBuiltinNumBasesExpr(ParsedType PT);
ExprResult BuildSYCLBuiltinNumBasesExpr(SourceLocation Loc,
QualType SourceTy);
/// Get a value based on the type of the given base number so that callers
/// can wrap it in a decltype() to get the actual type of the base class.
ExprResult ActOnSYCLBuiltinBaseTypeExpr(ParsedType PT, Expr *Idx);
ExprResult BuildSYCLBuiltinBaseTypeExpr(SourceLocation Loc, QualType SourceTy,
Expr *Idx);
/// Emit a diagnostic about the given attribute having a deprecated name, and
/// also emit a fixit hint to generate the new attribute name.
void DiagnoseDeprecatedAttribute(const ParsedAttr &A, StringRef NewScope,
StringRef NewName);
/// Diagnoses an attribute in the 'intelfpga' namespace and suggests using
/// the attribute in the 'intel' namespace instead.
void CheckDeprecatedSYCLAttributeSpelling(const ParsedAttr &A,
StringRef NewName = "");
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(
SourceLocation Loc, unsigned DiagID,
DeviceDiagnosticReason Reason = DeviceDiagnosticReason::Sycl |
DeviceDiagnosticReason::Esimd);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc,
DeviceDiagnosticReason Reason);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && VDecl->hasAttr<SYCLSimdAttr>() &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::sycl_private);
}
};
inline Expr *checkMaxWorkSizeAttrExpr(Sema &S, const AttributeCommonInfo &CI,
Expr *E) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
llvm::APSInt ArgVal;
ExprResult ICE = S.VerifyIntegerConstantExpression(E, &ArgVal);
if (ICE.isInvalid())
return nullptr;
E = ICE.get();
if (ArgVal.isNegative()) {
S.Diag(E->getExprLoc(),
diag::warn_attribute_requires_non_negative_integer_argument)
<< E->getType() << S.Context.UnsignedLongLongTy
<< E->getSourceRange();
return E;
}
unsigned Val = ArgVal.getZExtValue();
if (Val == 0) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_is_zero)
<< CI << E->getSourceRange();
return nullptr;
}
}
return E;
}
template <typename WorkGroupAttrType>
void Sema::addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr,
Expr *ZDimExpr) {
assert((XDimExpr && YDimExpr && ZDimExpr) &&
"argument has unexpected null value");
// Accept template arguments for now as they depend on something else.
// We'll get to check them when they eventually get instantiated.
if (!XDimExpr->isValueDependent() && !YDimExpr->isValueDependent() &&
!ZDimExpr->isValueDependent()) {
// Save ConstantExpr in semantic attribute
XDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, XDimExpr);
YDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, YDimExpr);
ZDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, ZDimExpr);
if (!XDimExpr || !YDimExpr || !ZDimExpr)
return;
}
D->addAttr(::new (Context)
WorkGroupAttrType(Context, CI, XDimExpr, YDimExpr, ZDimExpr));
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
IPCComm.h | /*! @brief Flag for checking if this header has already been included. */
#ifndef YGGIPCCOMM_H_
#define YGGIPCCOMM_H_
// OpenSimRoot has a conflicting definition for 'msg'
#ifdef USE_OSR_YGG
#undef IPCINSTALLED
#endif
#ifdef IPCINSTALLED
#include <fcntl.h> /* For O_* constants */
#include <sys/stat.h> /* For mode constants */
#include <sys/msg.h>
#include <sys/types.h>
#include <sys/sem.h>
#include <sys/shm.h>
#endif /*IPCINSTALLED*/
#include <CommBase.h>
#ifdef __cplusplus /* If this is a C++ compiler, use C linkage */
extern "C" {
#endif
#ifdef IPCINSTALLED
/*! @brief Maximum number of channels. */
#define _yggTrackChannels 256
/*! @brief Names of channels in use. */
static int _yggChannelNames[_yggTrackChannels];
//static char * _yggChannelNames[_yggTrackChannels];
/*! @brief Number of channels in use. */
static unsigned _yggChannelsUsed = 0;
static unsigned _ipc_rand_seeded = 0;
/*!
@brief Message buffer structure.
*/
typedef struct msgbuf_t {
long mtype; //!< Message buffer type
char data[YGG_MSG_MAX]; //!< Buffer for the message
} msgbuf_t;
/*!
@brief Check if an IPC channel can be initialized.
@param[in] comm comm_t* Comm structure with name that should be checked.
@returns int -1 if the channel can't be initialized.
*/
static inline
int check_channels(comm_t* comm) {
// Fail if name is empty
if (strlen(comm->name) == 0) {
ygglog_error("Cannot create channel with empty name.");
return -1;
}
// Fail if trying to re-use the same channel twice
unsigned i;
char *key = comm->address;
int error_code = 0;
#ifdef _OPENMP
#pragma omp critical (ipc)
{
#endif
for (i = 0; i < _yggChannelsUsed; i++ ) {
if (_yggChannelNames[i] == atoi(comm->address)) {
/* if (0 == strcmp(_yggChannelNames[i], key)) { */
ygglog_error("Attempt to re-use channel: name=%s, key=%s, i=%d",
comm->name, key, i);
error_code = -1;
break;
}
}
// Fail if > _yggTrackChannels channels used
if ((!error_code) && (_yggChannelsUsed >= _yggTrackChannels)) {
ygglog_error("Too many channels in use, max: %d", _yggTrackChannels);
error_code = -1;
}
#ifdef _OPENMP
}
#endif
return error_code;
};
/*!
@brief Add a new channel to the list of existing channels.
@param[in] name const char * Name of channel to record.
*/
static inline
void add_channel(const comm_t* comm) {
#ifdef _OPENMP
#pragma omp critical (ipc)
{
#endif
// printf("add_channel(%s): %d, %s\n", comm->name, _yggChannelsUsed, comm->address);
_yggChannelNames[_yggChannelsUsed++] = atoi(comm->address);
#ifdef _OPENMP
}
#endif
};
/*!
@brief Remove a channel.
@param[in] comm comm_t* Comm with channel that should be removed.
@param[in] close_comm int If 1, the queue will be closed, otherwise it will
just be removed from the register and it is assumed that another process
will close it.
@returns int -1 if removal not successful.
*/
static inline
int remove_comm(const comm_t* comm, const int close_comm) {
int ret;
if (close_comm) {
ret = msgctl(((int*)(comm->handle))[0], IPC_RMID, NULL);
/* if (ret < 0) { */
/* ygglog_error("remove_comm(%s): Could not close comm.", comm->name); */
/* return ret; */
/* } */
}
ret = -1;
unsigned i;
int ich = atoi(comm->address);
#ifdef _OPENMP
#pragma omp critical (ipc)
{
#endif
for (i = 0; i < _yggChannelsUsed; i++) {
if (ich == _yggChannelNames[i]) {
memmove(_yggChannelNames + i, _yggChannelNames + i + 1,
(_yggTrackChannels - (i + 1))*sizeof(int));
_yggChannelsUsed--;
ret = 0;
break;
}
}
if (ret < 0) {
ygglog_error("remove_comm(%s): Could not locate comm in register.", comm->name);
}
/* if ((ret != -1) && (ich == (int)(_yggChannelsUsed - 1))) { */
/* /\* memmove(_yggChannelNames + ich, _yggChannelNames + ich + 1, *\/ */
/* /\* (_yggTrackChannels - (ich + 1))*sizeof(char*)); *\/ */
/* _yggChannelsUsed--; */
/* } */
#ifdef _OPENMP
}
#endif
return ret;
};
/*!
@brief Create a new channel.
@param[in] comm comm_t * Comm structure initialized with new_comm_base.
@returns int -1 if the address could not be created.
*/
static inline
int new_ipc_address(comm_t *comm) {
int ret;
// TODO: small chance of reusing same number
int key = 0;
#ifdef _OPENMP
#pragma omp critical (ipc)
{
#endif
if (!(_ipc_rand_seeded)) {
srand(ptr2seed(comm));
_ipc_rand_seeded = 1;
}
#ifdef _OPENMP
}
#endif
while (key == 0) {
key = rand();
}
if (strlen(comm->name) == 0) {
sprintf(comm->name, "tempnewIPC.%d", key);
} else {
ret = check_channels(comm);
if (ret < 0)
return ret;
}
sprintf(comm->address, "%d", key);
int *fid = (int*)malloc(sizeof(int));
if (fid == NULL) {
ygglog_error("new_ipc_address: Could not malloc queue fid.");
return -1;
}
fid[0] = msgget(key, (IPC_CREAT | 0777));
if (fid[0] < 0) {
ygglog_error("new_ipc_address: msgget(%d, %d | 0777) ret(%d), errno(%d): %s",
key, IPC_CREAT, fid[0], errno, strerror(errno));
return -1;
}
comm->handle = (void*)fid;
add_channel(comm);
return 0;
};
/*!
@brief Initialize a sysv_ipc communicator.
@param[in] comm comm_t * Comm structure initialized with init_comm_base.
@returns int -1 if the comm could not be initialized.
*/
static inline
int init_ipc_comm(comm_t *comm) {
if (!(comm->flags & COMM_FLAG_VALID))
return -1;
if (strlen(comm->name) == 0) {
sprintf(comm->name, "tempinitIPC.%s", comm->address);
} else {
int ret = check_channels(comm);
if (ret < 0)
return ret;
}
add_channel(comm);
int qkey = atoi(comm->address);
int *fid = (int *)malloc(sizeof(int));
if (fid == NULL) {
ygglog_error("init_ipc_comm: Could not malloc queue fid.");
return -1;
}
fid[0] = msgget(qkey, 0600);
if (fid[0] < 0) {
ygglog_error("init_ipc_address: msgget(%d, 0600) ret(%d), errno(%d): %s",
qkey, fid[0], errno, strerror(errno));
return -1;
}
comm->handle = (void*)fid;
return 0;
};
/*!
@brief Perform deallocation for basic communicator.
@param[in] x comm_t* Pointer to communicator to deallocate.
@returns int 1 if there is an error, 0 otherwise.
*/
static inline
int free_ipc_comm(comm_t *x) {
if (x->handle != NULL) {
if (strcmp(x->direction, "recv") == 0) {
remove_comm(x, 1);
} else {
remove_comm(x, 0);
}
free(x->handle);
x->handle = NULL;
}
return 0;
};
/*!
@brief Get number of messages in the comm.
@param[in] comm_t* Communicator to check.
@returns int Number of messages. -1 indicates an error.
*/
static inline
int ipc_comm_nmsg(const comm_t *x) {
struct msqid_ds buf;
if (x->handle == NULL) {
ygglog_error("ipc_comm_nmsg: Queue handle is NULL.");
return -1;
}
int rc = msgctl(((int*)x->handle)[0], IPC_STAT, &buf);
if (rc != 0) {
/* ygglog_error("ipc_comm_nmsg: Could not access queue."); */
return 0;
}
int ret = buf.msg_qnum;
return ret;
};
/*!
@brief Send a message to the comm.
Send a message smaller than YGG_MSG_MAX bytes to an output comm. If the
message is larger, it will not be sent.
@param[in] x comm_t* structure that comm should be sent to.
@param[in] data character pointer to message that should be sent.
@param[in] len size_t length of message to be sent.
@returns int 0 if send succesfull, -1 if send unsuccessful.
*/
static inline
int ipc_comm_send(const comm_t *x, const char *data, const size_t len) {
ygglog_debug("ipc_comm_send(%s): %d bytes", x->name, len);
if (comm_base_send(x, data, len) == -1)
return -1;
msgbuf_t t;
t.mtype = 1;
memcpy(t.data, data, len);
int ret = -1;
int handle = ((int*)(x->handle))[0];
while (1) {
ret = msgsnd(handle, &t, len, IPC_NOWAIT);
ygglog_debug("ipc_comm_send(%s): msgsnd returned %d", x->name, ret);
if (ret == 0) break;
if ((ret == -1) && (errno == EAGAIN)) {
ygglog_debug("ipc_comm_send(%s): msgsnd, sleep", x->name);
usleep(YGG_SLEEP_TIME);
} else {
struct msqid_ds buf;
int rtrn = msgctl(handle, IPC_STAT, &buf);
if ((rtrn == 0) && ((buf.msg_qnum + len) > buf.msg_qbytes)) {
ygglog_debug("ipc_comm_send(%s): msgsnd, queue full, sleep", x->name);
usleep(YGG_SLEEP_TIME);
} else {
ygglog_error("ipc_comm_send: msgsend(%d, %p, %d, IPC_NOWAIT) ret(%d), errno(%d): %s",
((int*)(x->handle))[0], &t, len, ret, errno, strerror(errno));
ret = -1;
break;
}
}
}
ygglog_debug("ipc_comm_send(%s): returning %d", x->name, ret);
return ret;
};
/*!
@brief Receive a message from an input comm.
Receive a message smaller than YGG_MSG_MAX bytes from an input comm.
@param[in] x comm_t* structure that message should be sent to.
@param[out] data char ** pointer to allocated buffer where the message
should be saved. This should be a malloc'd buffer if allow_realloc is 1.
@param[in] len const size_t length of the allocated message buffer in bytes.
@param[in] allow_realloc const int If 1, the buffer will be realloced if it
is not large enought. Otherwise an error will be returned.
@returns int -1 if message could not be received. Length of the received
message if message was received.
*/
static inline
int ipc_comm_recv(const comm_t *x, char **data, const size_t len,
const int allow_realloc) {
ygglog_debug("ipc_comm_recv(%s)", x->name);
msgbuf_t t;
t.mtype = 1;
int ret = -1;
int len_recv = -1;
while (1) {
ret = msgrcv(((int*)(x->handle))[0], &t, YGG_MSG_MAX, 0, IPC_NOWAIT);
if (ret == -1 && errno == ENOMSG) {
ygglog_debug("ipc_comm_recv(%s): no input, sleep", x->name);
usleep(YGG_SLEEP_TIME);
} else {
ygglog_debug("ipc_comm_recv(%s): received input: %d bytes, ret=%d",
x->name, strlen(t.data), ret);
break;
}
}
if (ret <= 0) {
ygglog_debug("ipc_comm_recv: msgrecv(%d, %p, %d, 0, IPC_NOWAIT): %s",
(int*)(x->handle), &t, (int)YGG_MSG_MAX, strerror(errno));
return -1;
}
len_recv = ret + 1;
if (len_recv > (int)len) {
if (allow_realloc) {
ygglog_debug("ipc_comm_recv(%s): reallocating buffer from %d to %d bytes.",
x->name, (int)len, len_recv);
(*data) = (char*)realloc(*data, len_recv);
if (*data == NULL) {
ygglog_error("ipc_comm_recv(%s): failed to realloc buffer.", x->name);
return -1;
}
} else {
ygglog_error("ipc_comm_recv(%s): buffer (%d bytes) is not large enough for message (%d bytes)",
x->name, len, len_recv);
return -(len_recv - 1);
}
}
memcpy(*data, t.data, len_recv);
(*data)[len_recv - 1] = '\0';
ret = len_recv - 1;
ygglog_debug("ipc_comm_recv(%s): returns %d bytes", x->name, ret);
return ret;
};
/*!
@brief Send a large message to an output comm.
Send a message larger than YGG_MSG_MAX bytes to an output comm by breaking
it up between several smaller messages and sending initial message with the
size of the message that should be expected. Must be partnered with
ipc_comm_recv_nolimit for communication to make sense.
@param[in] x comm_t* structure that message should be sent to.
@param[in] data character pointer to message that should be sent.
@param[in] len size_t length of message to be sent.
@returns int 0 if send succesfull, -1 if send unsuccessful.
*/
static inline
int ipc_comm_send_nolimit(const comm_t *x, const char *data, const size_t len){
ygglog_debug("ipc_comm_send_nolimit(%s): %d bytes", x->name, len);
int ret = -1;
size_t msgsiz = 0;
char msg[YGG_MSG_MAX];
sprintf(msg, "%ld", (long)(len));
ret = ipc_comm_send(x, msg, strlen(msg));
if (ret != 0) {
ygglog_debug("ipc_comm_send_nolimit(%s): sending size of payload failed.", x->name);
return ret;
}
size_t prev = 0;
while (prev < len) {
if ((len - prev) > YGG_MSG_MAX)
msgsiz = YGG_MSG_MAX;
else
msgsiz = len - prev;
ret = ipc_comm_send(x, data + prev, msgsiz);
if (ret != 0) {
ygglog_debug("ipc_comm_send_nolimit(%s): send interupted at %d of %d bytes.",
x->name, (int)prev, (int)len);
break;
}
prev += msgsiz;
ygglog_debug("ipc_comm_send_nolimit(%s): %d of %d bytes sent",
x->name, prev, len);
}
if (ret == 0)
ygglog_debug("ipc_comm_send_nolimit(%s): %d bytes completed", x->name, len);
return ret;
};
// Definitions in the case where IPC libraries not installed
#else /*IPCINSTALLED*/
/*!
@brief Print error message about IPC library not being installed.
*/
static inline
void ipc_install_error() {
ygglog_error("Compiler flag 'IPCINSTALLED' not defined so IPC bindings are disabled.");
};
/*!
@brief Perform deallocation for basic communicator.
@param[in] x comm_t* Pointer to communicator to deallocate.
@returns int 1 if there is an error, 0 otherwise.
*/
static inline
int free_ipc_comm(comm_t *x) {
// Prevent C4100 warning on windows by referencing param
#ifdef _WIN32
UNUSED(x);
#endif
ipc_install_error();
return 1;
};
/*!
@brief Create a new channel.
@param[in] comm comm_t * Comm structure initialized with new_comm_base.
@returns int -1 if the address could not be created.
*/
static inline
int new_ipc_address(comm_t *comm) {
// Prevent C4100 warning on windows by referencing param
#ifdef _WIN32
UNUSED(comm);
#endif
ipc_install_error();
return -1;
};
/*!
@brief Initialize a sysv_ipc communicator.
@param[in] comm comm_t * Comm structure initialized with init_comm_base.
@returns int -1 if the comm could not be initialized.
*/
static inline
int init_ipc_comm(comm_t *comm) {
// Prevent C4100 warning on windows by referencing param
#ifdef _WIN32
UNUSED(comm);
#endif
ipc_install_error();
return -1;
};
/*!
@brief Get number of messages in the comm.
@param[in] x comm_t Communicator to check.
@returns int Number of messages. -1 indicates an error.
*/
static inline
int ipc_comm_nmsg(const comm_t *x) {
// Prevent C4100 warning on windows by referencing param
#ifdef _WIN32
UNUSED(x);
#endif
ipc_install_error();
return -1;
};
/*!
@brief Send a message to the comm.
Send a message smaller than YGG_MSG_MAX bytes to an output comm. If the
message is larger, it will not be sent.
@param[in] x comm_t* structure that comm should be sent to.
@param[in] data character pointer to message that should be sent.
@param[in] len size_t length of message to be sent.
@returns int 0 if send succesfull, -1 if send unsuccessful.
*/
static inline
int ipc_comm_send(const comm_t *x, const char *data, const size_t len) {
// Prevent C4100 warning on windows by referencing param
#ifdef _WIN32
UNUSED(x);
UNUSED(data);
UNUSED(len);
#endif
ipc_install_error();
return -1;
};
/*!
@brief Receive a message from an input comm.
Receive a message smaller than YGG_MSG_MAX bytes from an input comm.
@param[in] x comm_t* structure that message should be sent to.
@param[out] data char ** pointer to allocated buffer where the message
should be saved. This should be a malloc'd buffer if allow_realloc is 1.
@param[in] len const size_t length of the allocated message buffer in bytes.
@param[in] allow_realloc const int If 1, the buffer will be realloced if it
is not large enought. Otherwise an error will be returned.
@returns int -1 if message could not be received. Length of the received
message if message was received.
*/
static inline
int ipc_comm_recv(const comm_t *x, char **data, const size_t len,
const int allow_realloc) {
// Prevent C4100 warning on windows by referencing param
#ifdef _WIN32
UNUSED(x);
UNUSED(data);
UNUSED(len);
UNUSED(allow_realloc);
#endif
ipc_install_error();
return -1;
};
#endif /*IPCINSTALLED*/
#ifdef __cplusplus /* If this is a C++ compiler, end C linkage */
}
#endif
#endif /*YGGIPCCOMM_H_*/
|
pbkdf2-hmac-sha1_fmt_plug.c | /*
* This software is Copyright (c) 2013 magnum and it is hereby released to
* the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pbkdf2_hmac_sha1;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pbkdf2_hmac_sha1);
#else
#include <ctype.h>
#include <string.h>
#include <assert.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "base64_convert.h"
#include "stdint.h"
#include "pbkdf2_hmac_sha1.h"
#include "pbkdf2_hmac_common.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PBKDF2-HMAC-SHA1"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(ARCH_WORD_32)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define PAD_SIZE 64
#define PLAINTEXT_LENGTH 125
static struct custom_salt {
unsigned int length;
unsigned int rounds;
unsigned int use_utf8;
//unsigned int outlen; /* Not used yet */
unsigned char salt[PBKDF2_32_MAX_SALT_SIZE];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[PBKDF2_SHA1_BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p;
int saltlen;
memset(&cs, 0, sizeof(cs));
ciphertext += PBKDF2_SHA1_TAG_LEN;
cs.use_utf8 = ciphertext[13] == 'S';
cs.rounds = atou(ciphertext);
ciphertext = strchr(ciphertext, '$') + 1;
p = strchr(ciphertext, '$');
saltlen = 0;
memset(cs.salt, 0, sizeof(cs.salt));
while (ciphertext < p) { /** extract salt **/
cs.salt[saltlen++] =
atoi16[ARCH_INDEX(ciphertext[0])] * 16 +
atoi16[ARCH_INDEX(ciphertext[1])];
ciphertext += 2;
}
cs.length = saltlen;
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
#ifdef SSE_GROUP_SZ_SHA1
int lens[SSE_GROUP_SZ_SHA1], i;
unsigned char *pin[SSE_GROUP_SZ_SHA1];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = crypt_out[index+i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->salt, cur_salt->length,
cur_salt->rounds, &(x.poutc),
PBKDF2_SHA1_BINARY_SIZE, 0);
#else
pbkdf2_sha1((const unsigned char*)(saved_key[index]),
strlen(saved_key[index]),
cur_salt->salt, cur_salt->length,
cur_salt->rounds, (unsigned char*)crypt_out[index],
PBKDF2_SHA1_BINARY_SIZE, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], PBKDF2_SHA1_BINARY_SIZE);
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/* Check the FULL binary, just for good measure. There is no chance we'll
have a false positive here but this function is not performance sensitive. */
static int cmp_exact(char *source, int index)
{
return pbkdf2_hmac_sha1_cmp_exact(get_key(index), source, cur_salt->salt, cur_salt->length, cur_salt->rounds);
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->rounds;
}
struct fmt_main fmt_pbkdf2_hmac_sha1 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
PBKDF2_SHA1_BINARY_SIZE,
PBKDF2_32_BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
{
"iteration count",
},
pbkdf2_hmac_sha1_common_tests
}, {
init,
done,
fmt_default_reset,
pbkdf2_hmac_sha1_prepare,
pbkdf2_hmac_sha1_valid,
pbkdf2_hmac_sha1_split,
pbkdf2_hmac_sha1_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
triplet.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* These codes were originally parts of spglib, but only develped */
/* and used for phono3py. Therefore these were moved from spglib to */
/* phono3py. This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stddef.h>
#include <mathfunc.h>
#include <triplet_h/triplet.h>
#include <triplet_h/triplet_iw.h>
#include <triplet_h/triplet_kpoint.h>
static size_t get_triplets_reciprocal_mesh_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
const int num_rot,
TPLCONST int (*rotations)[3][3],
const int swappable);
size_t tpl_get_BZ_triplets_at_q(size_t (*triplets)[3],
const size_t grid_point,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const size_t *map_triplets,
const size_t num_map_triplets,
const int mesh[3])
{
return tpk_get_BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
num_map_triplets,
mesh);
}
size_t tpl_get_triplets_reciprocal_mesh_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const size_t grid_point,
const int mesh[3],
const int is_time_reversal,
const int num_rot,
TPLCONST int (*rotations)[3][3],
const int swappable)
{
return get_triplets_reciprocal_mesh_at_q(map_triplets,
map_q,
grid_address,
grid_point,
mesh,
is_time_reversal,
num_rot,
rotations,
swappable);
}
void tpl_get_integration_weight(double *iw,
char *iw_zero,
const double *frequency_points,
const size_t num_band0,
TPLCONST int relative_grid_address[24][4][3],
const int mesh[3],
TPLCONST size_t (*triplets)[3],
const size_t num_triplets,
TPLCONST int (*bz_grid_address)[3],
const size_t *bz_map,
const double *frequencies1,
const size_t num_band1,
const double *frequencies2,
const size_t num_band2,
const size_t tp_type,
const int openmp_per_triplets,
const int openmp_per_bands)
{
size_t i, num_band_prod;
int tp_relative_grid_address[2][24][4][3];
tpl_set_relative_grid_address(tp_relative_grid_address,
relative_grid_address,
tp_type);
num_band_prod = num_band0 * num_band1 * num_band2;
#pragma omp parallel for if (openmp_per_triplets)
for (i = 0; i < num_triplets; i++) {
tpi_get_integration_weight(iw + i * num_band_prod,
iw_zero + i * num_band_prod,
frequency_points, /* f0 */
num_band0,
tp_relative_grid_address,
mesh,
triplets[i],
num_triplets,
bz_grid_address,
bz_map,
frequencies1, /* f1 */
num_band1,
frequencies2, /* f2 */
num_band2,
tp_type,
openmp_per_bands);
}
}
void tpl_get_integration_weight_with_sigma(double *iw,
char *iw_zero,
const double sigma,
const double sigma_cutoff,
const double *frequency_points,
const size_t num_band0,
TPLCONST size_t (*triplets)[3],
const size_t num_triplets,
const double *frequencies,
const size_t num_band,
const size_t tp_type)
{
size_t i, num_band_prod, const_adrs_shift;
double cutoff;
cutoff = sigma * sigma_cutoff;
num_band_prod = num_band0 * num_band * num_band;
const_adrs_shift = num_triplets * num_band0 * num_band * num_band;
#pragma omp parallel for
for (i = 0; i < num_triplets; i++) {
tpi_get_integration_weight_with_sigma(
iw + i * num_band_prod,
iw_zero + i * num_band_prod,
sigma,
cutoff,
frequency_points,
num_band0,
triplets[i],
const_adrs_shift,
frequencies,
num_band,
tp_type,
0);
}
}
int tpl_is_N(const size_t triplet[3], const int *grid_address)
{
int i, j, sum_q, is_N;
is_N = 1;
for (i = 0; i < 3; i++) {
sum_q = 0;
for (j = 0; j < 3; j++) { /* 1st, 2nd, 3rd triplet */
sum_q += grid_address[triplet[j] * 3 + i];
}
if (sum_q) {
is_N = 0;
break;
}
}
return is_N;
}
void tpl_set_relative_grid_address(
int tp_relative_grid_address[2][24][4][3],
TPLCONST int relative_grid_address[24][4][3],
const size_t tp_type)
{
size_t i, j, k, l;
int signs[2];
signs[0] = 1;
signs[1] = 1;
if ((tp_type == 2) || (tp_type == 3)) {
/* q1+q2+q3=G */
/* To set q2+1, q3-1 is needed to keep G */
signs[1] = -1;
}
/* tp_type == 4, q+k_i-k_f=G */
for (i = 0; i < 2; i++) {
for (j = 0; j < 24; j++) {
for (k = 0; k < 4; k++) {
for (l = 0; l < 3; l++) {
tp_relative_grid_address[i][j][k][l] =
relative_grid_address[j][k][l] * signs[i];
}
}
}
}
}
static size_t get_triplets_reciprocal_mesh_at_q(size_t *map_triplets,
size_t *map_q,
int (*grid_address)[3],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
const int num_rot,
TPLCONST int (*rotations)[3][3],
const int swappable)
{
MatINT *rot_real;
int i;
size_t num_ir;
rot_real = mat_alloc_MatINT(num_rot);
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_real->mat[i], rotations[i]);
}
num_ir = tpk_get_ir_triplets_at_q(map_triplets,
map_q,
grid_address,
grid_point,
mesh,
is_time_reversal,
rot_real,
swappable);
mat_free_MatINT(rot_real);
return num_ir;
}
|
opencl_blockchain_fmt_plug.c | /* blockchain "My Wallet" cracker patch for JtR. Hacked together during June of
* 2013 by Dhiru Kholia <dhiru at openwall.com>.
*
* See https://blockchain.info/wallet/wallet-format
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz at openwall.net>
* and Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>, and it is
* hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_blockchain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_blockchain);
#else
#include <string.h>
#include <openssl/aes.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "jumbo.h"
#include "common-opencl.h"
#include "options.h"
#define FORMAT_LABEL "blockchain-opencl"
#define FORMAT_NAME "blockchain My Wallet"
#define FORMAT_TAG "$blockchain$"
#define TAG_LENGTH 12
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN MEM_ALIGN_WORD
#define BIG_ENOUGH (8192 * 32)
// increase me (in multiples of 16) to increase the decrypted and search area
#define SAFETY_FACTOR 16
#define uint8_t unsigned char
#define uint16_t unsigned short
#define uint32_t ARCH_WORD_32
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} blockchain_password;
typedef struct {
uint32_t v[32/4];
} blockchain_hash;
typedef struct {
uint8_t length;
uint8_t salt[16];
int iterations;
int outlen;
} blockchain_salt;
static int *cracked;
static int any_cracked;
static struct fmt_tests blockchain_tests[] = {
{"$blockchain$400$53741f25a90ef521c90bb2fd73673e64089ff2cca6ba3cbf6f34e0f80f960b2f60b9ac48df009dc30c288dcf1ade5f16c70a3536403fc11a68f242ba5ad3fcceae3ca5ecd23905997474260aa1357fc322b1434ffa026ba6ad33707c9ad5260e7230b87d8888a45ddc27513adb30af8755ec0737963ae6bb281318c48f224e9c748f6697f75f63f718bebb3401d6d5f02cf62b1701c205762c2f43119b68771ed10ddab79b5f74f56d611f61f77b8b65b5b5669756017429633118b8e5b8b638667e44154de4cc76468c4200eeebda2711a65333a7e3c423c8241e219cdca5ac47c0d4479444241fa27da20dba1a1d81e778a037d40d33ddea7c39e6d02461d97185f66a73deedff39bc53af0e9b04a3d7bf43648303c9f652d99630cd0789819376d68443c85f0eeb7af7c83eecddf25ea912f7721e3fb73ccaedf860f0f033ffc990ed73db441220d0cbe6e029676fef264dc2dc497f39bedf4041ba355d086134744d5a36e09515d230cd499eb20e0c574fb1bd9d994ce26f53f21d06dd58db4f8e0efbcaee7038df793bbb3daa96", "strongpassword"},
{"$blockchain$384$ece598c58b22a3b245a02039ce36bdf589a86b6344e802b4a3ac9b727cc0b6977e9509bc1ac4d1b7b9cbf9089ecdc89706f0a469325f7ee218b2212b6cd3e32677be20eee91e267fe13ebded02946d4ae1163ef22b3dca327d7390091247ac770288a0c7be181b21a48a8f945d9913cdfdc4cfd739ee3a41ced11cacde22e3233250e36f8b8fb4d81de5298a84374af75b88afda3438eed232e52aa0eb29e0d475456c86ae9d1aaadca14bc25f273c93fd4d7fd8316ed5306733bca77e8214277edd3155342abe0710985dc20b4f80e6620e386aa7658f92df25c7c932f0eb1beca25253662bd558647a3ba741f89450bfdba59a0c016477450fbcecd62226626e06ed2e3f5a4180e32d534c7769bcd1160aad840cfd3b7b13a90d34fedb3408fe74379a9e8a840fe3bfee8e0ee01f77ee389613fa750c3d2771b83eeb4e16598f76c15c311c325bd5d54543571aa20934060e332f451e58d67ad0f4635c0c021fa76821a68d64f1a5fb6fd70365eef4442cedcc91eb8696d52d078807edd89d", "qwertyuiop1"},
{NULL}
};
static struct custom_salt {
unsigned char data[BIG_ENOUGH];
int length;
} *cur_salt;
static cl_int cl_error;
static blockchain_password *inbuffer;
static blockchain_hash *outbuffer;
static blockchain_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
size_t insize, outsize, settingsize, cracked_size;
#define MIN(a, b) (((a) > (b)) ? (b) : (a))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define OCL_CONFIG "blockchain"
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static size_t get_task_max_size()
{
return 0;
}
static size_t get_default_workgroup()
{
if (cpu(device_info[gpu_id]))
return get_platform_vendor_id(platform_id) == DEV_INTEL ?
8 : 1;
else
return 64;
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(blockchain_password) * gws;
outsize = sizeof(blockchain_hash) * gws;
settingsize = sizeof(blockchain_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
static void done(void)
{
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
}
static void init(struct fmt_main *self)
{
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL,
warn, 1, self, create_clobj, release_clobj,
sizeof(blockchain_password), 0);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int len;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtok(ctcopy, "$")) == NULL)
goto err;
len = atoi(p);
if(len > BIG_ENOUGH)
goto err;
if ((p = strtok(NULL, "$")) == NULL)
goto err;
if (strlen(p) != len * 2)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static union {
struct custom_salt _cs;
ARCH_WORD_32 dummy;
} un;
struct custom_salt *cs = &(un._cs);
ctcopy += 12;
p = strtok(ctcopy, "$");
cs->length = atoi(p);
p = strtok(NULL, "$");
for (i = 0; i < cs->length; i++)
cs->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->data, 16);
currentsalt.length = 16;
currentsalt.iterations = 10;
currentsalt.outlen = 32;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int blockchain_decrypt(unsigned char *derived_key, unsigned char *data)
{
unsigned char out[SAFETY_FACTOR];
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, cur_salt->data, 16);
if(AES_set_decrypt_key(derived_key, 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n");
}
AES_cbc_encrypt(data + 16, out, SAFETY_FACTOR, &akey, iv, AES_DECRYPT);
/* various tests */
if (out[0] != '{') // fast test
return -1;
/* XXX we are assuming that "guid" will be found in the first block
* itself (when SAFETY_FACTOR is 16).
*/
if (memmem(out, SAFETY_FACTOR, "\"guid\"", 6))
return 0;
if (memmem(out, SAFETY_FACTOR, "sharedKey", 9))
return 0;
/* XXX more tests required?
* if (memmem(out, cur_salt->length, "pbkdf2_iterations", 17))
* return 0;
*/
return -1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index;
global_work_size = (count + local_work_size - 1) / local_work_size * local_work_size;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, &local_work_size, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (!blockchain_decrypt((unsigned char*)outbuffer[index].v, cur_salt->data))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_blockchain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
blockchain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
client.c | /*
This was a collaboration between:
George Plukov (plukovga, 1316246)
Jean Lucas Ferreira (ferrejll, 1152120)
- The current code creates two remote servers (append_server, and verify_server)
- append_server is connected to verify_server via UDP socket
- All required features have been implemented
- Enforces properties F0, F2 and F3, sometimes enforces F1
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <math.h>
#include <string.h>
#include "append.h"
#include "verify.h"
#include "client.h"
//append and verify clients
CLIENT *clnt_app, *clnt_ver;
//will hold all argumests as CSV
static char* all_args;
int main(int argc, char ** argv) {
property_index = atoi(argv[1]);
thread_count = atoi(argv[2]);
segment_length = atoi(argv[3]);
num_segments = atoi(argv[4]);
c0 = argv[5][0];
c1 = argv[6][0];
c2 = argv[7][0];
host_name1 = argv[8];
host_name2 = argv[9];
max_str_size = num_segments*segment_length;
all_args = malloc(sizeof(int)*4 + sizeof(char)*3 + sizeof(char*));
sprintf(all_args, "%d,%d,%d,%d,%c,%c,%c,%s", \
property_index,thread_count,segment_length,num_segments,c0,c1,c2,host_name2);
validate_arguments();
//now we can initialize the program.
if (init() == -1) {
printf("program failed to initialize\n");
return 1;
}
//start the program
#pragma omp parallel num_threads(thread_count) reduction(+ : tot_num_passed_segments)
{
construct_string();
check_segment();
}
write_output();
teardown();
return 1;
}
//quick check to see if arguments are valid.
void validate_arguments() {
int valid = 1;
if (thread_count < 3 || thread_count > 8) {
valid = 0;
}
if (property_index < 0 || property_index > 3) {
valid = 0;
}
if (segment_length < 1 || num_segments < 1) {
valid = 0;
}
if ( (segment_length % 2 == 1) && thread_count == 3 && (property_index == 0 || property_index == 3 )) {
printf("Impossible configuration: 3 letters and odd number of spaces per segment\n");
exit(0);
}
if ((segment_length <= 1) && thread_count == 3 && (property_index == 0 || property_index == 1 || property_index == 3 )) {
printf("Impossible configuration\n");
exit(0);
}
if ( (segment_length < 2) && property_index == 2 && thread_count == 3){
printf("Impossible configuration for Property 3\n");
exit(0);
}
if (!valid) {
printf("Invalid arguments given\n");
exit(0);
}
if ( strcmp(host_name1,SERVER_MOORE_IP) != 0 && strcmp(host_name1,SERVER_MILLS_IP) != 0 ) {
printf("Invalid host name 1 given. Expected one of:\n\tMoore: %s\n\tMills: %s\n",SERVER_MOORE_IP, SERVER_MILLS_IP );
exit(0);
}
if ( strcmp(host_name2,SERVER_MOORE_IP) != 0 && strcmp(host_name2,SERVER_MILLS_IP) != 0 ) {
printf("Invalid host name 2 given. Expected one of:\n\tMoore: %s\n\tMills: %s\n",SERVER_MOORE_IP, SERVER_MILLS_IP );
exit(0);
}
}
int init() {
//for generating random numbers
srand(time(NULL));
// S is the final generated string
S = calloc(max_str_size + 1, sizeof(char));
int *result;
/**** setup RPC init append server ****/
clnt_app = clnt_create(host_name1,APPENDPROG,APPENDVERS,"tcp");
if (clnt_app == (CLIENT *)NULL) {
clnt_pcreateerror(host_name1);
return -1;
}
result = rpcinitappendserver_1(&all_args, clnt_app);
if (result == (int *) NULL) {
clnt_perror(clnt_app,host_name1);
return -1;
}
/**** setup RPC init verify server ****/
clnt_ver = clnt_create(host_name2,VERIFYPROG,VERIFYVERS,"tcp");
if (clnt_ver == (CLIENT *)NULL) {
clnt_pcreateerror(host_name2);
return -1;
}
result = rpcinitverifyserver_1(clnt_ver);
if (result == (int *) NULL) {
clnt_perror(clnt_ver,host_name2);
return -1;
}
return 1;
}
//client builds the string by trying to append via APPEND server
int construct_string() {
int rank = omp_get_thread_num();
char c = 'a' + rank;
char* ptr_c = &c;
struct timespec sleepTime;
sleepTime.tv_sec = 0;
int *success_ret;
int success = 1;
/** begin to attempt adding character c to the string in the Append_Server **/
while (success >= 0) {
sleepTime.tv_nsec = MIN_SLEEP_TIME + rand() % MAX_SLEEP_TIME;
nanosleep(&sleepTime, NULL);
//RPC to try an append string
// success == 0, c was added
// success == 1, c was NOT added
// success == -1, S is complete
#pragma omp critical
success_ret = rpcappend_1(&ptr_c, clnt_app);
success = *success_ret;
}
return 0;
}
/** String in append server is complete, begin to check segments **/
int check_segment() {
char* segment = malloc(sizeof(char)*(segment_length+1));
int seg_index;
int valid_segment = 0;
my_struct *response = malloc(sizeof(*response));
while ( 1 ) {
#pragma omp critical
response = rpcgetseg_1(segment_length,clnt_ver);
char* q = malloc(strlen(response->data));
q = response->data;
sscanf(q, "%d,%s", &seg_index, segment);
//we are done receiving segments
if (strcmp(segment,SEGMENT_FINISH) == 0) {
break;
}
//verify the segment is valid
valid_segment = check_property(segment);
// printf("segment %s is valid = %d. %d\n", segment, valid_segment,tot_num_passed_segments);
//this is a sum reduction each thread partakes
tot_num_passed_segments += valid_segment;
//concat the string
int starting_pos = seg_index*segment_length;
size_t i,j;
for (i = starting_pos, j = 0; j < segment_length; i++, j++) {
S[i] = segment[j];
}
}
return 1;
}
//given a segment of the string, check if it satisfies the property
int check_property(char* seg) {
size_t i;
int num_c0 = 0, num_c1 = 0, num_c2 = 0;
//count how many occurrences of each c0,c1,c2 in the given segment
#pragma omp parallel for num_threads(thread_count) \
reduction(+: num_c0, num_c1, num_c2) \
schedule(static,1)
for ( i = 0; i < segment_length; i++) {
if (seg[i] == c0) {
num_c0++;
}
else if (seg[i] == c1) {
num_c1++;
}
else if (seg[i] == c2) {
num_c2++;
}
}
//check the necessary property condition
if (property_index == 0 && (num_c0 + num_c1) == num_c2 ) {
return 1;
}
else if (property_index == 1 && (num_c0 + 2*num_c1) == num_c2 ) {
return 1;
}
else if (property_index == 2 && (num_c0 * num_c1) == num_c2 ) {
return 1;
}
else if (property_index == 3 && (num_c0 - num_c1) == num_c2 ) {
return 1;
}
return 0;
}
void write_output() {
printf("%s\n%d\n", S, tot_num_passed_segments);
//write to out.txt
FILE *f = fopen(OUTPUT_FILE, "w");
fprintf(f, "%s\n", S);
fprintf(f, "%d\n",tot_num_passed_segments );
fclose(f);
}
void teardown() {
clnt_destroy(clnt_app);
clnt_destroy(clnt_ver);
free(all_args);
free(S);
}
|
GB_unaryop__abs_int32_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_uint64
// op(A') function: GB_tran__abs_int32_uint64
// C type: int32_t
// A type: uint64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int32_t z = (int32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_uint64
(
int32_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gru_utils.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "lite/backends/arm/math/sgemm.h"
namespace paddle {
namespace lite {
namespace arm {
namespace math {
template <typename T>
struct GRUMetaValue {
T* gate_weight;
T* state_weight;
T* gate_value;
T* reset_output_value;
T* output_value;
T* prev_out_value;
};
template <typename Dtype>
inline void gru_add_with_bias(
const Dtype* din, const Dtype* bias, Dtype* dout, int batch, int size);
template <>
inline void gru_add_with_bias(
const float* din, const float* bias, float* dout, int batch, int size) {
#pragma omp parallel for
for (int i = 0; i < batch; ++i) {
int j = 0;
auto din_batch = din + i * size;
auto dout_batch = dout + i * size;
float32x4_t vb0 = vld1q_f32(bias);
float32x4_t vin0 = vld1q_f32(din_batch);
float32x4_t vout0;
float32x4_t vout1;
float32x4_t vin1;
float32x4_t vb1;
for (; j < size - 7; j += 8) {
vin1 = vld1q_f32(din_batch + j + 4);
vb1 = vld1q_f32(bias + j + 4);
vout0 = vaddq_f32(vb0, vin0);
vout1 = vaddq_f32(vb1, vin1);
vb0 = vld1q_f32(bias + j + 8);
vin0 = vld1q_f32(din_batch + j + 8);
vst1q_f32(dout_batch + j, vout0);
vst1q_f32(dout_batch + j + 4, vout1);
}
for (; j < size; ++j) {
dout_batch[j] = din_batch[j] + bias[j];
}
}
}
template <lite_api::ActivationType Act>
static void gru_unit_reset_act_impl(float* updata_gate,
int stride_update,
float* reset_gate,
int stride_reset,
const float* hidden_prev,
int stride_hidden_prev,
float* reset_hidden_prev,
int stride_reset_hidden_prev,
int frame_size,
int batch_size) {
#pragma omp parallel for
for (int b = 0; b < batch_size; ++b) {
float32x4_t vpre0 = vdupq_n_f32(0.f);
float32x4_t vpre1 = vdupq_n_f32(0.f);
float prev = 0.f;
int i = 0;
for (; i < frame_size - 7; i += 8) {
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vr0 = vld1q_f32(reset_gate + i);
float32x4_t vr1 = vld1q_f32(reset_gate + i + 4);
float32x4_t vau0 = lite::arm::math::vactive_f32<Act>(vu0);
float32x4_t vau1 = lite::arm::math::vactive_f32<Act>(vu1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t var0 = lite::arm::math::vactive_f32<Act>(vr0);
float32x4_t var1 = lite::arm::math::vactive_f32<Act>(vr1);
vst1q_f32(updata_gate + i, vau0);
vst1q_f32(updata_gate + i + 4, vau1);
float32x4_t vres0 = vmulq_f32(vpre0, var0);
float32x4_t vres1 = vmulq_f32(vpre1, var1);
vst1q_f32(reset_gate + i, var0);
vst1q_f32(reset_gate + i + 4, var1);
vst1q_f32(reset_hidden_prev + i, vres0);
vst1q_f32(reset_hidden_prev + i + 4, vres1);
}
for (; i < frame_size; ++i) {
updata_gate[i] = lite::arm::math::active_f32<Act>(updata_gate[i]);
reset_gate[i] = lite::arm::math::active_f32<Act>(reset_gate[i]);
if (hidden_prev) {
prev = hidden_prev[i];
}
reset_hidden_prev[i] = reset_gate[i] * prev;
}
updata_gate += stride_update;
reset_gate += stride_reset;
if (hidden_prev) {
hidden_prev += stride_hidden_prev;
}
reset_hidden_prev += stride_reset_hidden_prev;
}
}
template <lite_api::ActivationType Act>
static void gru_unit_out_act_impl(bool origin_mode,
float* updata_gate,
int stride_update,
float* cell_state,
int stride_cell_state,
const float* hidden_prev,
int stride_hidden_prev,
float* hidden,
int stride_hidden,
int frame_size,
int batch_size) {
#pragma omp parallel for
for (int b = 0; b < batch_size; ++b) {
float32x4_t vpre0 = vdupq_n_f32(0.f);
float32x4_t vpre1 = vdupq_n_f32(0.f);
float prev = 0.f;
int i = 0;
if (origin_mode) {
for (; i < frame_size - 7; i += 8) {
float32x4_t vc0 = vld1q_f32(cell_state + i);
float32x4_t vc1 = vld1q_f32(cell_state + i + 4);
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vac0 = lite::arm::math::vactive_f32<Act>(vc0);
float32x4_t vac1 = lite::arm::math::vactive_f32<Act>(vc1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t vh0 = vmlsq_f32(vac0, vu0, vac0);
float32x4_t vh1 = vmlsq_f32(vac1, vu1, vac1);
vst1q_f32(cell_state + i, vac0);
vst1q_f32(cell_state + i + 4, vac1);
vh0 = vmlaq_f32(vh0, vu0, vpre0);
vh1 = vmlaq_f32(vh1, vu1, vpre1);
vst1q_f32(hidden + i, vh0);
vst1q_f32(hidden + i + 4, vh1);
}
for (; i < frame_size; ++i) {
if (hidden_prev) {
prev = hidden_prev[i];
}
cell_state[i] = lite::arm::math::active_f32<Act>(cell_state[i]);
hidden[i] =
cell_state[i] * (1.f - updata_gate[i]) + updata_gate[i] * prev;
}
} else {
for (; i < frame_size - 7; i += 8) {
float32x4_t vc0 = vld1q_f32(cell_state + i);
float32x4_t vc1 = vld1q_f32(cell_state + i + 4);
float32x4_t vu0 = vld1q_f32(updata_gate + i);
float32x4_t vu1 = vld1q_f32(updata_gate + i + 4);
float32x4_t vac0 = lite::arm::math::vactive_f32<Act>(vc0);
float32x4_t vac1 = lite::arm::math::vactive_f32<Act>(vc1);
if (hidden_prev) {
vpre0 = vld1q_f32(hidden_prev + i);
vpre1 = vld1q_f32(hidden_prev + i + 4);
}
float32x4_t vh0 = vmlsq_f32(vpre0, vpre0, vu0);
float32x4_t vh1 = vmlsq_f32(vpre1, vpre1, vu1);
vst1q_f32(cell_state + i, vac0);
vst1q_f32(cell_state + i + 4, vac1);
vh0 = vmlaq_f32(vh0, vu0, vac0);
vh1 = vmlaq_f32(vh1, vu1, vac1);
vst1q_f32(hidden + i, vh0);
vst1q_f32(hidden + i + 4, vh1);
}
for (; i < frame_size; ++i) {
cell_state[i] = lite::arm::math::active_f32<Act>(cell_state[i]);
if (hidden_prev) {
prev = hidden_prev[i];
}
hidden[i] =
prev * (1.f - updata_gate[i]) + updata_gate[i] * cell_state[i];
}
}
updata_gate += stride_update;
cell_state += stride_cell_state;
if (hidden_prev) {
hidden_prev += stride_hidden_prev;
}
hidden += stride_hidden;
}
}
inline void gru_unit_reset_act(lite_api::ActivationType act_type,
GRUMetaValue<float> value,
int frame_size,
int batch_size) {
auto updata_gate = value.gate_value;
auto reset_gate = value.gate_value + frame_size;
auto hidden_prev = value.prev_out_value;
auto reset_hidden_prev = value.reset_output_value;
int stride_update = 3 * frame_size;
int stride_reset = 3 * frame_size;
int stride_hidden_prev = frame_size;
int stride_reset_hidden_prev = frame_size;
switch (act_type) {
case lite_api::ActivationType::kIndentity:
gru_unit_reset_act_impl<lite_api::ActivationType::kIndentity>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kTanh:
gru_unit_reset_act_impl<lite_api::ActivationType::kTanh>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kSigmoid:
gru_unit_reset_act_impl<lite_api::ActivationType::kSigmoid>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kRelu:
gru_unit_reset_act_impl<lite_api::ActivationType::kRelu>(
updata_gate,
stride_update,
reset_gate,
stride_reset,
hidden_prev,
stride_hidden_prev,
reset_hidden_prev,
stride_reset_hidden_prev,
frame_size,
batch_size);
break;
default:
break;
}
}
inline void gru_unit_out_act(lite_api::ActivationType act_type,
bool origin_mode,
GRUMetaValue<float> value,
int frame_size,
int batch_size) {
auto updata_gate = value.gate_value;
auto cell_state = value.gate_value + 2 * frame_size;
auto hidden_prev = value.prev_out_value;
auto hidden = value.output_value;
int stride_update = 3 * frame_size;
int stride_cell_state = 3 * frame_size;
int stride_hidden_prev = frame_size;
int stride_hidden = frame_size;
switch (act_type) {
case lite_api::ActivationType::kIndentity:
gru_unit_out_act_impl<lite_api::ActivationType::kIndentity>(
origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kTanh:
gru_unit_out_act_impl<lite_api::ActivationType::kTanh>(origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kSigmoid:
gru_unit_out_act_impl<lite_api::ActivationType::kSigmoid>(
origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
case lite_api::ActivationType::kRelu:
gru_unit_out_act_impl<lite_api::ActivationType::kRelu>(origin_mode,
updata_gate,
stride_update,
cell_state,
stride_cell_state,
hidden_prev,
stride_hidden_prev,
hidden,
stride_hidden,
frame_size,
batch_size);
break;
default:
break;
}
}
template <typename T>
struct GRUUnitFunctor {
static void compute(GRUMetaValue<T> value,
int frame_size,
int batch_size,
const lite_api::ActivationType active_node,
const lite_api::ActivationType active_gate,
bool origin_mode,
ARMContext* ctx) {
operators::ActivationParam act_param;
act_param.has_active = false;
if (value.prev_out_value) {
sgemm(false,
false,
batch_size,
frame_size * 2,
frame_size,
1.f,
value.prev_out_value,
frame_size,
value.gate_weight,
frame_size * 2,
1.f,
value.gate_value,
frame_size * 3,
nullptr,
false,
act_param,
ctx);
}
gru_unit_reset_act(active_gate, value, frame_size, batch_size);
if (value.prev_out_value) {
sgemm(false,
false,
batch_size,
frame_size,
frame_size,
1.f,
value.reset_output_value,
frame_size,
value.state_weight,
frame_size,
1.f,
value.gate_value + frame_size * 2,
frame_size * 3,
nullptr,
false,
act_param,
ctx);
}
gru_unit_out_act(active_node, origin_mode, value, frame_size, batch_size);
}
};
} // namespace math
} // namespace arm
} // namespace lite
} // namespace paddle
|
copy_mat.c | /*
Copy a matrix A to a second matrix B in our blocked format.
*/
#include <stdlib.h>
#include <stdio.h>
#include "declarations.h"
void copy_mat(A,B)
struct blockmatrix A,B;
{
int blk,i,j;
double *p;
double *q;
for (blk=1; blk<=A.nblocks; blk++)
{
switch (A.blocks[blk].blockcategory)
{
case DIAG:
p=A.blocks[blk].data.vec;
q=B.blocks[blk].data.vec;
for (i=1; i<=A.blocks[blk].blocksize; i++)
q[i]=p[i];
break;
case MATRIX:
p=A.blocks[blk].data.mat;
q=B.blocks[blk].data.mat;
#pragma omp parallel for schedule(dynamic,64) private(i,j) shared(p,q)
for (j=1; j<=A.blocks[blk].blocksize; j++)
for (i=1; i<=A.blocks[blk].blocksize; i++)
q[ijtok(i,j,A.blocks[blk].blocksize)]=
p[ijtok(i,j,A.blocks[blk].blocksize)];
break;
default:
printf("copy_mat illegal block type \n");
exit(12);
};
}
}
|
GB_unaryop__lnot_fp32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_int16
// op(A') function: GB_tran__lnot_fp32_int16
// C type: float
// A type: int16_t
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_int16
(
float *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRACC_OMP_014_Counter_wrong_critical_simd_Inter_yes.c | /*
Concurrent access on a counter with the wrong lock, by utilising OpenMP critical directives and simd. Atomicity Violation.
Two locks are used to ensure that addition and substraction cannot be interrupted by them selfes on other teams.
Although they are able to interrupt eachother leading to a wrong result. Inter Region.
*/
#include <omp.h>
#include <stdio.h>
#include <stdbool.h>
#define N 100000
#define C 512
int countervar[C];
int init(){
for(int i=0; i<C; i++){
countervar[i]=0;
}
return 0;
}
int count(){
#pragma omp target map(tofrom:countervar[0:C]) device(0)
#pragma omp teams distribute
for(int j=0; j<N; j++){
#pragma omp critical(addlock)
#pragma omp simd
for(int i=0; i<C; i++){
countervar[i]++;
}
#pragma omp critical(sublock)
#pragma omp simd
for(int i=0; i<C; i++){
countervar[i] -= 2;
}
}
return 0;
}
int check(){
bool test = false;
for(int i=0; i<C; i++){
if(countervar[i]!= -N){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
int main(){
init();
count();
check();
return 0;
} |
OpenMPHelper.h | #ifndef PICA_OMPHELPER_H
#define PICA_OMPHELPER_H
/* This file should be included everywhere instead of <omp.h>.
If the code is build with OpenMP it just includes <omp.h>, otherwise it
provides wrappers for OpenMP functions and #pragma omp.
The code including this file can use OpenMP functions and
pragmas independently of whether it will be build with OpenMP or not. */
#ifdef _OPENMP
#include <omp.h>
namespace pica {
inline bool useOpenMP() { return true; }
inline int getNumThreads() {
int numThreads = 0;
#pragma omp parallel
{
#pragma omp master
numThreads = omp_get_num_threads();
}
return numThreads;
}
} // namespace pica
#else
typedef void* omp_lock_t;
inline int omp_get_max_threads() { return 1; }
inline int omp_get_thread_num() { return 0; }
inline int omp_get_num_threads() { return 1; }
inline void omp_set_num_threads(int) {}
inline void omp_init_lock(omp_lock_t *) {}
inline void omp_set_lock(omp_lock_t *) {}
inline void omp_unset_lock(omp_lock_t *) {}
inline void omp_destroy_lock(omp_lock_t *) {}
#pragma omp
namespace pica
{
inline bool useOpenMP() { return false; }
inline int getNumThreads() { return 1; }
} // namespace pica
#endif
#ifdef _MSC_VER
#define collapse(N)
#endif
#endif
|
DRB068-restrictpointer2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The restrict type qualifier is an indication to the compiler that,
if the memory addressed by the restrict -qualified pointer is modified, no other pointer will access that same memory.
If a particular chunk of memory is not modified, it can be aliased through more than one restricted pointer.
A C99 restrict feature.
For gcc, you must use -std=c99 to compile this program.
*/
#include <stdlib.h>
#include <stdio.h>
void init(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
#pragma omp parallel for private(i )
for (i = 0; i < n; i++) {
a[i] = 1;
b[i] = i;
c[i] = i * i;
}
}
void foo(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
#pragma omp parallel for private(i )
for (i = 0; i < n; i++)
a[i] = b[i] + c[i];
}
void print(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
for (i = 0; i < n; i++) {
printf("%d %d %d\n", a[i], b[i], c[i]);
}
}
int main()
{
int n = 1000;
int * a , *b, *c;
a = (int*) malloc (n* sizeof (int));
if (a ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
b = (int*) malloc (n* sizeof (int));
if (b ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
c = (int*) malloc (n* sizeof (int));
if (c ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
init (n, a, b,c);
foo (n, a, b,c);
print (n, a, b,c);
free (a);
free (b);
free (c);
return 0;
}
|
omp_bar_bench.c | /*
* Copyright (c) 2013-2016, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <omp.h>
#include "measurement_framework.h"
#include "placement.h"
#include <sched.h>
#include <pthread.h>
/*-----------------------------------------------------------*/
/* barrierKernel */
/* */
/* Main kernel for barrier benchmark. */
/* First threads under each process synchronise with an */
/* OMP BARRIER. Then a MPI barrier synchronises each MPI */
/* process. MPI barrier is called within a OpenMP master */
/* directive. */
/*-----------------------------------------------------------*/
int barrierKernel(int totalReps, int fill){
int repIter;
struct sk_measurement * mes;
char outname[512];
int i;
if (fill) {
sprintf(outname, "barriers_kernel_omp_fill%d", \
omp_get_max_threads());
} else {
sprintf(outname, "barriers_kernel_omp_rr%d", \
omp_get_max_threads());
}
mes = (struct sk_measurement*) malloc(sizeof(struct sk_measurement)*omp_get_max_threads());
for(i=0; i<omp_get_max_threads(); i++){
uint64_t *buf = (uint64_t*) malloc(sizeof(uint64_t)*totalReps);
sk_m_init(&(mes[i]), totalReps, outname, buf);
}
uint32_t* cores = placement(omp_get_max_threads(), fill);
/* Open the parallel region */
#pragma omp parallel default(none) \
private(repIter) \
shared(totalReps,mes,cores)//,comm)
{
int tid =omp_get_thread_num();
//set affinity
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(cores[tid],&mask);
pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&mask);
for (repIter=0; repIter<totalReps; repIter++){
#pragma omp barrier
#pragma omp barrier
sk_m_restart_tsc(&(mes[tid]));
{
#pragma omp barrier
}
sk_m_add(&(mes[tid]));
}
}
for(i=0; i<omp_get_max_threads(); i++)
sk_m_print(&(mes[i]),i);
return 0;
}
int runBarrier(int totalReps, int fill, bool sync){
int repIter;
struct sk_measurement * mes;
char outname[512];
int i;
if (sync) {
sprintf(outname, "barriers_ompsync_fill%d", omp_get_max_threads());
} else {
sprintf(outname, "barriers_omp_fill%d", omp_get_max_threads());
}
mes = (struct sk_measurement*) malloc(sizeof(struct sk_measurement)*omp_get_max_threads());
for(i=0; i<omp_get_max_threads(); i++){
uint64_t *buf = (uint64_t*) malloc(sizeof(uint64_t)*totalReps);
sk_m_init(&(mes[i]), totalReps, outname, buf);
}
uint32_t* cores = placement(omp_get_max_threads(), fill);
/* Open the parallel region */
#pragma omp parallel default(none) \
private(repIter) \
shared(totalReps,mes,cores,sync)//,comm)
{
int tid =omp_get_thread_num();
//set affinity
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(cores[tid],&mask);
pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&mask);
for (repIter=0; repIter<totalReps; repIter++){
if (sync){
// In case of additional synchronization:
// Execute two more barriers before ach barrier
#pragma omp barrier
#pragma omp barrier
// .. and reset the TSC to measure the cost of the next tsc ..
sk_m_restart_tsc(&(mes[tid]));
}
{
#pragma omp barrier
}
sk_m_add(&(mes[tid]));
}
}
for(i=0; i<omp_get_max_threads(); i++)
sk_m_print(&(mes[i]),i);
return 0;
}
int barrierDriver(int totalReps){
/* initialise repsToDo to defaultReps */
int repsToDo = totalReps;
printf("Entering\n");fflush(stdout);
/* perform warm-up for benchmark */
/* Initialise the benchmark */
barrierKernel(repsToDo, 1);
barrierKernel(repsToDo, 0);
return 0;
}
int main(int argc, char *argv[]){
int totalReps = 1000;
if (argc != 2) {
printf("Calling Barrier driver\n");
barrierDriver(totalReps);
} else {
printf("Running barrier benchmark\n");
runBarrier(totalReps, 1, 0);
runBarrier(totalReps, 1, 1);
}
}
|
locate.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
#include <math.h>
#include <omp.h>
#include <lapacke_utils.h>
#include <mpi.h>
#define memory_isAligned(POINTER, BYTE_COUNT) \
(((uintptr_t)(const void *)(POINTER)) % (BYTE_COUNT) == 0)
int locate_l2_gridSearch__double64(const int ldgrd,
const int ngrd,
const int nobs,
const int iwantOT,
const double t0use,
const int *__restrict__ mask,
const double *__restrict__ tobs,
const double *__restrict__ tcorr,
const double *__restrict__ varobs,
const double *__restrict__ test,
double *__restrict__ t0,
double *__restrict__ objfn);
int locate_l2_gridSearch__float64(const int ldgrd,
const int ngrd,
const int nobs,
const int iwantOT,
const float t0use,
const int *__restrict__ mask,
const float *__restrict__ tobs,
const float *__restrict__ tcorr,
const float *__restrict__ varobs,
const float *__restrict__ test,
float *__restrict__ t0,
float *__restrict__ objfn);
int locate_l1_gridSearch__double64(const int ldgrd,
const int ngrd,
const int nobs,
const int iwantOT,
const double t0use,
const int *__restrict__ mask,
const double *__restrict__ tobs,
const double *__restrict__ varobs,
const double *__restrict__ test,
double *__restrict__ t0,
double *__restrict__ objfn);
static void locate_l2_stackT0__double64(const int ngrd,
const double tobs_i,
const double xnorm,
const double wt_i,
const double *__restrict__ test,
double *__restrict__ t0);
static void locate_l2_stackT0__float64(const int ngrd,
const float tobs_i,
const float xnorm,
const float wt_i,
const float *__restrict__ test,
float *__restrict__ t0);
static void locate_l2_stackObjfn__double64(const int ngrd,
const double tobs_i,
const double wt_i,
const double *__restrict__ test,
const double *__restrict__ t0,
double *__restrict__ objfn);
static void locate_l2_stackObjfn__float64(const int ngrd,
const float tobs_i,
const float wt_i,
const float *__restrict__ test,
const float *__restrict__ t0,
float *__restrict__ objfn);
double weightedMedian__double(const int n,
const double *__restrict__ x,
const double *__restrict__ w,
int *__restrict__ perm,
bool *lsort, int *ierr);
void locate_setDouble64(const int n, const double x0in,
double *__restrict__ x);
void locate_setFloat64(const int n, const float x0in,
float *__restrict__ x);
static void locate_nullDouble64(const int n, double *__restrict__ x);
void locate_nullFloat64(const int n, float *__restrict__ x);
double locate_sumDouble64(const int n, const double *__restrict__ x);
float locate_sumFloat64(const int n, const float *__restrict__ x);
int locate_sumInt(const int n, const int *__restrict__ x);
int locate_minLocDouble64(const int n, const double *__restrict__ x);
int locate_minLocFloat64(const int n, const float *__restrict__ x);
int memory_malloc__double(const int n, double **x);
int memory_malloc__float(const int n, float **x);
int memory_malloc__int(const int n, int **x);
double *memory_calloc__double(const int n);
float *memory_calloc__float(const int n);
int *memory_calloc__int(const int n);
void makeTest(const int nx, const int ny, const int nz,
const double dx, const double dy, const double dz,
const double xsrc, const double ysrc, const double zsrc,
double *__restrict__ test);
double makeObs(const double x, const double y, const double z,
const double xsrc, const double ysrc, const double zsrc);
int main2()
{
return 0;
}
int main()
{
double *objfn, *xrec, *yrec, *zrec, *t0, *tcorr, *test, *tobs, *varobs;
float *objfn4, *t04, *tcorr4, *test4, *tobs4, *varobs4;
int *mask;
double dx, dy, dz, t0use, xsrc, ysrc, zsrc;
float t0use4;
int igrd, iobs, iopt, ixsrc, iysrc, izsrc, iwantOT,
ldgrd, ngrd, nobs, nx, ny, nz;
bool lsort;
srand(4042);
nobs = 20;
iwantOT = 1;
t0use = 4.0;
dx = 1.e3;
dy = 1.e3;
dz = 1.e3;
nx = 145;
ny = 145;
nz = 45;
ngrd = nx*ny*nz;
ldgrd = ngrd + 64 - ngrd%64;
ixsrc = 12;
iysrc = 15;
izsrc = 5;
printf("true: %d\n", izsrc*nx*ny + iysrc*nx + ixsrc);
xsrc = (double) ixsrc*dx;
ysrc = (double) iysrc*dy;
zsrc = (double) izsrc*dz;
xrec = memory_calloc__double(nobs);
yrec = memory_calloc__double(nobs);
zrec = memory_calloc__double(nobs);
tobs = memory_calloc__double(nobs);
tcorr = memory_calloc__double(nobs);
varobs = memory_calloc__double(nobs);
test = memory_calloc__double(nobs*ldgrd);
mask = (int *)calloc((size_t) nobs, sizeof(int));
// Make the receiver locations
for (iobs=0; iobs<nobs; iobs++)
{
xrec[iobs] = ((double) (rand()))/RAND_MAX;
yrec[iobs] = ((double) (rand()))/RAND_MAX;
zrec[iobs] = ((double) (rand()))/RAND_MAX;
xrec[iobs] = xrec[iobs]*(double) (nx - 1)*dx;
yrec[iobs] = yrec[iobs]*(double) (ny - 1)*dy;
zrec[iobs] = zrec[iobs]*(double) (nz - 1)*dz;
tobs[iobs] = makeObs(xrec[iobs], yrec[iobs], zrec[iobs],
xsrc, ysrc, zsrc);
varobs[iobs] = ((double) (rand()))/RAND_MAX; //0.2;
makeTest(nx, ny, nz, dx, dy, dz,
xrec[iobs], yrec[iobs], zrec[iobs],
&test[ldgrd*iobs]);
}
// Add the origin time
if (iwantOT == 1)
{
for (iobs=0; iobs<nobs; iobs++)
{
tobs[iobs] = tobs[iobs] + t0use;
}
}
t0 = memory_calloc__double(ngrd);
objfn = memory_calloc__double(ngrd);
locate_l1_gridSearch__double64(ldgrd, ngrd, nobs, iwantOT,
t0use, mask, tobs, varobs,
test, t0, objfn);
iopt = locate_minLocDouble64(ngrd, objfn);
printf("l1 first estimate: %d %f\n", iopt, t0[iopt]);
locate_l2_gridSearch__double64(ldgrd, ngrd, nobs, iwantOT,
t0use, mask, tobs, tcorr, varobs,
test, t0, objfn);
iopt = locate_minLocDouble64(ngrd, objfn);
printf("double estimate: %d %f\n", iopt, t0[iopt]);
locate_l1_gridSearch__double64(ldgrd, ngrd, nobs, iwantOT,
t0use, mask, tobs, varobs,
test, t0, objfn);
iopt = locate_minLocDouble64(ngrd, objfn);
printf("double l1 testimate: %d %f\n", iopt, t0[iopt]);
free(objfn);
free(t0);
// Set the float problem
t0use4 = (float) t0use;
tobs4 = memory_calloc__float(nobs);
tcorr4 = memory_calloc__float(nobs);
varobs4 = memory_calloc__float(nobs);
test4 = memory_calloc__float(ldgrd*nobs);
for (iobs=0; iobs<nobs; iobs++)
{
tobs4[iobs] = (float) tobs[iobs];
varobs4[iobs] = (float) varobs[iobs];
for (igrd=0; igrd<ngrd; igrd++)
{
test4[ldgrd*iobs+igrd] = (float) test[ldgrd*iobs+igrd];
}
}
free(test);
free(tobs);
free(tcorr);
free(varobs);
objfn4 = memory_calloc__float(ngrd);
t04 = memory_calloc__float(ngrd);
locate_l2_gridSearch__float64(ldgrd, ngrd, nobs, iwantOT,
t0use4, mask, tobs4, tcorr4, varobs4,
test4, t04, objfn4);
iopt = locate_minLocFloat64(ngrd, objfn4);
printf("float estimate: %d\n", iopt);
free(test4);
free(objfn4);
free(t04);
free(tobs4);
free(tcorr4);
free(varobs4);
free(mask);
free(xrec);
free(yrec);
free(zrec);
double xs[7] = {0.1, 0.35, 0.05, 0.1, 0.15, 0.05, 0.2};
double ws[7] = {0.1, 0.35, 0.05, 0.1, 0.15, 0.05, 0.2};
double w1[7] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0};
int perm[7];
double wmed;
int ierr;
wmed = weightedMedian__double(7, xs, ws, perm, &lsort, &ierr);
printf("%f\n", wmed);
wmed = weightedMedian__double(7, xs, w1, perm, &lsort, &ierr);
printf("%f\n", wmed);
return 0;
}
void makeTest(const int nx, const int ny, const int nz,
const double dx, const double dy, const double dz,
const double xsrc, const double ysrc, const double zsrc,
double *__restrict__ test)
{
double d2, x, y, z;
int i, igrd, j, k;
const double slow = 1.0/5000.0;
for (k=0; k<nz; k++)
{
for (j=0; j<ny; j++)
{
for (i=0; i<nx; i++)
{
igrd = k*nx*ny + j*nx + i;
x = (double) i*dx;
y = (double) j*dy;
z = (double) k*dz;
d2 = pow(x-xsrc, 2) + pow(y-ysrc, 2) + pow(z-zsrc, 2);
test[igrd] = sqrt(d2)*slow;
}
}
}
return;
}
double makeObs(const double x, const double y, const double z,
const double xsrc, const double ysrc, const double zsrc)
{
const double slow = 1.0/5000.0;
double d2, tobs;
d2 = pow(x-xsrc, 2) + pow(y-ysrc, 2) + pow(z-zsrc, 2);
tobs = sqrt(d2)*slow;
return tobs;
}
int computeAngles(const int nx, const int ny, const int nz,
const bool lbackProj,
const double *__restrict__ ttimes,
double *__restrict__ az,
double *__restrict__ aoi)
{
int i, i1, i2, ic, igrd, ix, iy, iz, j, k, ngrdx, ngrdx3, nxy, y3, z3;
double *work, *workxm, *workxp, *workym, *workyp, *workzm, *workzp,
dxh, dxh2, dyh, dyh2, dzh, dzh2,
fxmh, fxph, fymh, fyph, fzmh, fzph, phi, rho, theta;
const int chunkx = 16;
const double half = 0.5;
const double pi180i = 180.0/M_PI;
ngrdx = 16;
ngrdx3 = ngrdx*3;
nxy = nx*ny;
work = memory_calloc__double((chunkx+1)*9);
workxp = memory_calloc__double(2*(chunkx+1));
workyp = memory_calloc__double(2*(chunkx+1));
workzp = memory_calloc__double(2*(chunkx+1));
workxm = memory_calloc__double(chunkx+1);
workym = memory_calloc__double(chunkx+1);
workzm = memory_calloc__double(chunkx+1);
for (k=1; k<nz-1; k++)
{
for (j=1; j<ny-1; j++)
{
for (i=1; i<nx-1; i=i+chunkx)
{
// prefetch the values in the grid
i1 = i-1;
i2 = MIN(nx-2, i+chunkx);
// differentiate pre-fetched values
for (z3=-1; z3<=1; z3++)
{
for (y3=-1; y3<=1; y3++)
{
iz = k + z3;
iy = j + y3;
for (ix=i1; ix<i2; ix++)
{
igrd = iz*nxy + iy*nx + ix;
// central difference
ic = (z3 + 1)*ngrdx3
+ (y3 + 1)*ngrdx
+ (ix - i1 + 1);
ic = ix;
fxph = half*(workxp[ic] + workxm[ic]); //half*(work[ic+1] + work[ic]);
//fxmh = half*(workx[ic] + workx[ic-4]); //half*(work[ic] + work[ic-1]);
fyph = half*(workyp[ic] + workym[ic]); //half*(work[ic+ngrdx] + work[ic]);
//fymh = half*(worky[ic] + worky[ic-4]); //half*(work[ic] + work[ic-ngrdx]);
fzph = half*(workzp[ic] + workzm[ic]); //half*(work[ic+ngrdx3] + work[ic]);
//fzph = half*(workz[ic] + workz[ic-4]); //half*(work[ic] + work[ic-ngrdx3]);
dxh = fxph;// - fxmh;
dyh = fyph;// - fymh;
dzh = fzph;// - fzmh;
dxh2 = dxh*dxh;
dyh2 = dyh*dyh;
dzh2 = dzh*dzh;
rho = sqrt(dxh2 + dyh2 + dzh2);
phi = acos(dzh/rho)*pi180i;
theta = atan2(dyh, dxh)*pi180i;
// the modeling coordinate system is right handed up
// while the observation coordinate system is + from
// north. hence, make +x 'north' and -y 'east'.
az[igrd] = 90.0 - theta;
if (az[igrd] < 0.0){az[igrd] = az[igrd] + 360.0;}
aoi[igrd] = theta - 180.0;
}
}
}
}
}
}
free(work);
free(workxm);
free(workym);
free(workzm);
free(workxp);
free(workyp);
free(workzp);
return 0;
}
/*!
* @brief Stacks the weighted residuals into the analytic origin time
* computation. This is for the analytic removal of the source
* time in a least squares optimization as descxribed by Moser
* et. al. 1992 Eqn 19 for diagonally weighted matrices.
*
* @param[in] ngrd number of grid points in the grid
* @param[in] tobs_i i'th observed pick time (seconds)
* @param[in] xnorm normalization factor which is the inverse of the
* sum of the data weights for all observations
* (1/seconds)
* @param[in] wt_i i'th observed pick time weight (1/seconds)
* @param[in] test estimate arrival times (seconds) at all points in the
* grid. [ngrd]
*
* @param[in,out] t0 on input contains the currented weighted residual
* sum for previous observations.
* on output contains the updated weighted residual
* sum which incorporates the current observation. [ngrd]
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
static void locate_l2_stackT0__double64(const int ngrd,
const double tobs_i,
const double xnorm,
const double wt_i,
const double *__restrict__ test,
double *__restrict__ t0)
{
double wt __attribute__ ((aligned(64))) = 0.0;
double tobs __attribute__ ((aligned(64))) = 0.0;
int igrd __attribute__ ((aligned(64))) = 0;
//------------------------------------------------------------------------//
wt = wt_i/xnorm;
tobs = tobs_i;
#ifdef __INTEL_COMPILER
__assume_aligned(test, 64);
__assume_aligned(t0, 64);
#else
#pragma omp simd aligned(t0, test:64)
#endif
for (igrd=0; igrd<ngrd; igrd++)
{
t0[igrd] = t0[igrd] + wt*(tobs - test[igrd]);
}
return;
}
//============================================================================//
/*!
* @brief Stacks the weighted residuals into the analytic origin time
* computation. This is for the analytic removal of the source
* time in a least squares optimization as descxribed by Moser
* et. al. 1992 Eqn 19 for diagonally weighted matrices.
*
* @param[in] ngrd number of grid points in the grid
* @param[in] tobs_i i'th observed pick time (seconds)
* @param[in] xnorm normalization factor which is the inverse of the
* sum of the data weights for all observations
* (1/seconds)
* @param[in] wt_i i'th observed pick time weight (1/seconds)
* @param[in] test estimate arrival times (seconds) at all points in the
* grid. [ngrd]
*
* @param[in,out] t0 on input contains the currented weighted residual
* sum for previous observations.
* on output contains the updated weighted residual
* sum which incorporates the current observation. [ngrd]
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
static void locate_l2_stackT0__float64(const int ngrd,
const float tobs_i,
const float xnorm,
const float wt_i,
const float *__restrict__ test,
float *__restrict__ t0)
{
float wt __attribute__ ((aligned(64))) = 0.0f;
float tobs __attribute__ ((aligned(64))) = 0.0f;
int igrd __attribute__ ((aligned(64))) = 0;
//------------------------------------------------------------------------//
wt = wt_i/xnorm;
tobs = tobs_i;
#ifdef __INTEL_COMPILER
__assume_aligned(test, 64);
__assume_aligned(t0, 64);
#else
#pragma omp simd aligned(t0, test:64)
#endif
for (igrd=0; igrd<ngrd; igrd++)
{
t0[igrd] = t0[igrd] + wt*(tobs - test[igrd]);
}
return;
}
//============================================================================//
/*!
* @brief Stacks the squared residuals into the least squares penalty
* function. This assumes diagonal weighting.
*
* @param[in] ngrd number of poitns in grid search
* @param[in] tobs_i i'th observed pick time (seconds)
* @param[in] wt_i i'th observed pick time weight (1/seconds)
* @param[in] test estimate arrival times (seconds) at all points in the
* grid. [ngrd]
* @param[in] t0 origin time (seconds) at all points in grid [ngrd]
*
* @param[in,out] objfn on input contains the weighted squared residuals
* at all poitns in the grid.
* on output contains the contribution of this
* observation to all the squared residuals at all
* points in the grid. [ngrd]
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
static void locate_l2_stackObjfn__double64(const int ngrd,
const double tobs_i,
const double wt_i,
const double *__restrict__ test,
const double *__restrict__ t0,
double *__restrict__ objfn)
{
double wt __attribute__ ((aligned(64))) = 0.0;
double res __attribute__ ((aligned(64))) = 0.0;
double tobs __attribute__ ((aligned(64))) = 0.0;
int igrd __attribute__ ((aligned(64))) = 0;
const double sqrt2i = 0.7071067811865475; //one/sqrt(two);
//------------------------------------------------------------------------//
wt = wt_i*sqrt2i;
tobs = tobs_i;
#ifdef __INTEL_COMPILER
__assume_aligned(test, 64);
__assume_aligned(t0, 64);
__assume_aligned(objfn, 64);
#else
#pragma omp simd aligned(t0, test, objfn: 64)
#endif
for (igrd=0; igrd<ngrd; igrd++)
{
res = wt*(tobs - (test[igrd] + t0[igrd]));
objfn[igrd] = objfn[igrd] + res*res;
}
return;
}
//============================================================================//
/*!
* @brief Stacks the squared residuals into the least squares penalty
* function. This assumes diagonal weighting.
*
* @param[in] ngrd number of poitns in grid search
* @param[in] tobs_i i'th observed pick time (seconds)
* @param[in] wt_i i'th observed pick time weight (1/seconds)
* @param[in] test estimate arrival times (seconds) at all points in the
* grid. [ngrd]
* @param[in] t0 origin time (seconds) at all points in grid [ngrd]
*
* @param[in,out] objfn on input contains the weighted squared residuals
* at all poitns in the grid.
* on output contains the contribution of this
* observation to all the squared residuals at all
* points in the grid. [ngrd]
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
static void locate_l2_stackObjfn__float64(const int ngrd,
const float tobs_i,
const float wt_i,
const float *__restrict__ test,
const float *__restrict__ t0,
float *__restrict__ objfn)
{
float wt __attribute__ ((aligned(64))) = 0.0f;
float res __attribute__ ((aligned(64))) = 0.0f;
float tobs __attribute__ ((aligned(64))) = 0.0f;
int igrd __attribute__ ((aligned(64))) = 0;
const float sqrt2i = 0.7071067811865475f; //one/sqrt(two);
//------------------------------------------------------------------------//
wt = wt_i*sqrt2i;
tobs = tobs_i;
#ifdef __INTEL_COMPILER
__assume_aligned(test, 64);
__assume_aligned(t0, 64);
__assume_aligned(objfn, 64);
#else
#pragma omp simd aligned(t0, test, objfn:64)
#endif
for (igrd=0; igrd<ngrd; igrd++)
{
res = wt*(tobs - (test[igrd] + t0[igrd]));
objfn[igrd] = objfn[igrd] + res*res;
}
return;
}
//============================================================================//
double *memory_calloc__double(const int n)
{
double *x = NULL;
int i, ierr;
const double zero __attribute__ ((aligned(64))) = 0.0;
ierr = memory_malloc__double(n, &x);
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#endif
//memset(x, 0.0, (size_t) n*sizeof(double));
for (i=0; i<n; i++)
{
x[i] = zero;
}
return x;
}
float *memory_calloc__float(const int n)
{
float *x = NULL;
int i, ierr;
const float zero __attribute__ ((aligned(64))) = 0.0f;
ierr = memory_malloc__float(n, &x);
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#endif
//memset(x, 0.0f, (size_t) n*sizeof(float));
for (i=0; i<n; i++)
{
x[i] = zero;
}
return x;
}
int *memory_calloc__int(const int n)
{
int *x = NULL;
int i, ierr;
const int zero __attribute__ ((aligned(64))) = 0;
ierr = memory_malloc__int(n, &x);
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#endif
//memset(x, 0.0f, (size_t) n*sizeof(float));
for (i=0; i<n; i++)
{
x[i] = zero;
}
return x;
}
int memory_malloc__double(const int n, double **x)
{
const char *fcnm = "memory_malloc__double\0";
size_t nbytes;
int ierr;
ierr = 0;
nbytes = (size_t) (n + 64 - n%64)*sizeof(double);
*x = (double *) aligned_alloc(64, nbytes);
if (*x == NULL)
{
printf("%s: Error allocating array\n", fcnm);
ierr = 1;
}
return ierr;
}
int memory_malloc__float(const int n, float **x)
{
const char *fcnm = "memory_malloc__float\0";
size_t nbytes;
int ierr;
ierr = 0;
nbytes = (size_t) (n + 64 - n%64)*sizeof(float);
*x = (float *) aligned_alloc(64, nbytes);
if (*x == NULL)
{
printf("%s: Error allocating array\n", fcnm);
ierr = 1;
}
return ierr;
}
int memory_malloc__int(const int n, int **x)
{
const char *fcnm = "memory_malloc__int\0";
size_t nbytes;
int ierr;
ierr = 0;
nbytes = (size_t) (n + 64 - n%64)*sizeof(int);
*x = (int *) aligned_alloc(64, nbytes);
if (*x == NULL)
{
printf("%s: Error allocating array\n", fcnm);
ierr = 1;
}
return ierr;
}
//============================================================================//
/*!
* @brief Sets all elements of a 64 bit aligned array x to x0in
*
* @param[in] n number of points in array x
* @param[in] x0in value to set
*
* @param[out] x all values of array set to x0 [n]
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
void locate_setDouble64(const int n, const double x0in,
double *__restrict__ x)
{
double x0 __attribute__ ((aligned(64))) = x0in;
int i __attribute__ ((aligned(64))) = 0;
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#else
#pragma omp simd aligned(x : 64)
#endif
for (i=0; i<n; i++)
{
x[i] = x0;
}
}
//============================================================================//
/*!
* @brief Sets all elements of a 64 bit aligned array x to x0in
*
* @param[in] n number of points in array x
* @param[in] x0in value to set
*
* @param[out] x all values of array set to x0 [n]
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
void locate_setFloat64(const int n, const float x0in,
float *__restrict__ x)
{
float x0 __attribute__ ((aligned(64))) = x0in;
int i __attribute__ ((aligned(64))) = 0;
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#else
#pragma omp simd aligned(x : 64)
#endif
for (i=0; i<n; i++)
{
x[i] = x0;
}
}
//============================================================================//
/*!
* @brief Zeros out a 64 bit aligned array x
*
* @param[in] n number of points in arra yx
*
* @param[out] x nulled out array [n]
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
static void locate_nullDouble64(const int n, double *__restrict__ x)
{
double zero __attribute__ ((aligned(64))) = 0.0;
int i __attribute__ ((aligned(64))) = 0;
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#else
#pragma omp simd aligned(x : 64)
#endif
for (i=0; i<n; i++)
{
x[i] = zero;
}
return;
}
//============================================================================//
/*!
* @brief Zeros out a 64 bit aligned array x
*
* @param[in] n number of points in arra yx
*
* @param[out] x nulled out array [n]
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
void locate_nullFloat64(const int n, float *__restrict__ x)
{
float zero __attribute__ ((aligned(64))) = 0.0f;
int i __attribute__ ((aligned(64))) = 0;
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#else
#pragma omp simd aligned(x : 64)
#endif
for (i=0; i<n; i++)
{
x[i] = zero;
}
return;
}
//============================================================================//
/*!
* @brief Sums all elements in an array
*
* @param[in] n number of points in array to sum
* @param[in] x array to sum [n]
*
* @result sum of all elements in array x
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
double locate_sumDouble64(const int n, const double *__restrict__ x)
{
double xsum __attribute__ ((aligned(64))) = 0.0;
int i __attribute__ ((aligned(64))) = 0;
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#else
#pragma omp simd aligned(x : 64) reduction(+:xsum)
#endif
for (i=0; i<n; i++)
{
xsum = xsum + x[i];
}
return xsum;
}
//============================================================================//
int locate_minLocDouble64(const int n, const double *__restrict__ x)
{
double xmin __attribute__ ((aligned(64))) = 0.0;
int imin __attribute__ ((aligned(64))) = 0;
int i __attribute__ ((aligned(64))) = 0;
xmin = x[0];
imin = 0;
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#endif
for (i=1; i<n; i++)
{
if (x[i] < xmin)
{
imin = i;
xmin = x[i];
}
}
return imin;
}
//============================================================================//
int locate_minLocFloat64(const int n, const float *__restrict__ x)
{
float xmin __attribute__ ((aligned(64))) = 0.0f;
int imin __attribute__ ((aligned(64))) = 0;
int i __attribute__ ((aligned(64))) = 0;
xmin = x[0];
imin = 0;
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#endif
for (i=1; i<n; i++)
{
if (x[i] < xmin)
{
imin = i;
xmin = x[i];
}
}
return imin;
}
//============================================================================//
/*!
* @brief Sums all elements in an array
*
* @param[in] n number of points in array to sum
* @param[in] x array to sum [n]
*
* @result sum of all elements in array x
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
float locate_sumFloat64(const int n, const float *__restrict__ x)
{
float xsum __attribute__ ((aligned(64))) = 0.0f;
int i __attribute__ ((aligned(64))) = 0;
#ifdef __INTEL_COMPILER
__assume_aligned(x, 64);
#else
#pragma omp simd aligned(x : 64) reduction(+:xsum)
#endif
for (i=0; i<n; i++)
{
xsum = xsum + x[i];
}
return xsum;
}
//============================================================================//
int locate_sumInt(const int n, const int *__restrict__ x)
{
int i, xsum;
xsum = 0;
for (i=0; i<n; i++)
{
xsum = xsum + x[i];
}
return xsum;
}
//============================================================================//
/*!
* @brief Performs the least squares gridsearch. If the origin time is
* desired then the locations correspond to an analytic integration
* of the origin time (e.g. Moser et al. 1992).
*
* @param[in] ldgrd leading dimension of traveltime tables (>= ngrd)
* @param[in] ngrd number of grid points in traveltime tables
* @param[in] nobs number of observations
* @param[in] iwantOT if 1 then compute the origint time.
* otherwise the origin time will be set by t0use.
* @param[in] t0use if iwantOT is not when this this is the source
* origin time (seconds)
* @param[in] mask if the i'th observation is 1 then it is masked from
* from the location [nobs]
* @param[in] tobs observed pick times (seconds) [nobs]
* @param[in] tcorr static corrections (seconds) for stations [nobs].
* if NULL then it is ignored and assumed all zero.
* @param[in] varobs variance in pick times (seconds) [nobs]
* @param[in] test traveltime tables for each observation [ldgrd x nobs]
* with leading dimension ldgrd.
*
* @param[out] t0 origin time (seconds) at each point in grid serach [ngrd]
* @param[out] objfn residual squared objective function at each point in
* the grid [ngrd].
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
int locate_l2_gridSearch__double64(const int ldgrd,
const int ngrd,
const int nobs,
const int iwantOT,
const double t0use,
const int *__restrict__ mask,
const double *__restrict__ tobs,
const double *__restrict__ tcorr,
const double *__restrict__ varobs,
const double *__restrict__ test,
double *__restrict__ t0,
double *__restrict__ objfn)
{
const char *fcnm = "locate_l2_gridSearch__double64\0";
double *tobsCor, *wtUse;
double xnorm __attribute__ ((aligned(64))) = 0.0;
double tobs_i __attribute__ ((aligned(64))) = 0.0;
double wt_i __attribute__ ((aligned(64))) = 0.0;
const double zero = 0.0;
const double one = 1.0;
int *obsPtr, ibeg, ierr, iobs, jobs, nobsUse;
//------------------------------------------------------------------------//
//
// Error checking
ierr = 0;
if ((sizeof(double)*(size_t) ldgrd)%64 != 0 || ldgrd < ngrd || nobs < 1 ||
mask == NULL || tobs == NULL || varobs == NULL ||
test == NULL || t0 == NULL || objfn == NULL)
{
if ((sizeof(double)*(size_t) ldgrd)%64 != 0)
{
printf("%s: Error ldgrd must be divisible by 64\n", fcnm);
}
if (ldgrd < ngrd){printf("%s: Error ldgrd < ngrd\n", fcnm);}
if (mask == NULL){printf("%s: mask is null\n", fcnm);}
if (tobs == NULL){printf("%s: tobs is null\n", fcnm);}
if (varobs == NULL){printf("%s: varobs is null\n", fcnm);}
if (test == NULL){printf("%s: test is null\n", fcnm);}
if (t0 == NULL){printf("%s: t0 is null\n", fcnm);}
if (objfn == NULL){printf("%s: objfn is null\n", fcnm);}
ierr = 1;
return ierr;
}
// Require the arrays be 64 bit aligned
if (memory_isAligned(t0, 64) != 1 ||
memory_isAligned(test, 64) != 1 ||
memory_isAligned(objfn, 64) != 1)
{
printf("%s: Input arrays are not 64 bit aligned\n", fcnm);
ierr = 1;
return ierr;
}
// zero out the result
locate_nullDouble64(ngrd, objfn);
// Set the static corrections. While it would seem more sensible
// to add the correction to the estimate recall that we are ultimately
// interested in residuals so we instead remove it from the observation
// because t_obs - (t_est + t_stat) = t_obs - t_stat - t_est = t_cor - t_et
obsPtr = memory_calloc__int(nobs);
tobsCor = memory_calloc__double(nobs);
wtUse = memory_calloc__double(nobs);
nobsUse = 0;
xnorm = zero;
if (tcorr == NULL)
{
for (iobs=0; iobs<nobs; iobs++)
{
if (mask[iobs] == 0)
{
tobsCor[nobsUse] = tobs[iobs];
obsPtr[nobsUse] = iobs;
wtUse[nobsUse] = one/varobs[iobs];
xnorm = xnorm + wtUse[nobsUse]; //one/varobs[iobs];
nobsUse = nobsUse + 1;
}
}
}
else
{
for (iobs=0; iobs<nobs; iobs++)
{
if (mask[iobs] == 0)
{
tobsCor[nobsUse] = tobs[iobs] - tcorr[iobs];
obsPtr[nobsUse] = iobs;
wtUse[nobsUse] = one/varobs[iobs];
xnorm = xnorm + wtUse[nobsUse]; //one/varobs[iobs];
nobsUse = nobsUse + 1;
}
}
}
// Compute the least-squares origin time which is the average reisdual
if (iwantOT == 1)
{
locate_nullDouble64(ngrd, t0);
for (jobs=0; jobs<nobsUse; jobs++)
{
iobs = obsPtr[jobs];
tobs_i = tobsCor[jobs];
wt_i = wtUse[jobs]; //one/varUse[jobs];
ibeg = ldgrd*iobs;
locate_l2_stackT0__double64(ngrd, tobs_i, xnorm, wt_i,
&test[ibeg], t0);
}
}
// Set the desired residual
else
{
locate_setDouble64(ngrd, t0use, t0);
}
// Compute the locations with the origin times at each grid point
for (jobs=0; jobs<nobsUse; jobs++)
{
iobs = obsPtr[jobs];
tobs_i = tobsCor[jobs];
wt_i = wtUse[jobs]; //one/varUse[jobs];
ibeg = ldgrd*iobs;
locate_l2_stackObjfn__double64(ngrd, tobs_i, wt_i,
&test[ibeg], t0, objfn);
}
free(obsPtr);
free(tobsCor);
free(wtUse);
return ierr;
}
//============================================================================//
/*!
* @brief Performs the least squares gridsearch. If the origin time is
* desired then the locations correspond to an analytic integration
* of the origin time (e.g. Moser et al. 1992).
*
* @param[in] ldgrd leading dimension of traveltime tables (>= ngrd)
* @param[in] ngrd number of grid points in traveltime tables
* @param[in] nobs number of observations
* @param[in] iwantOT if 1 then compute the origint time.
* otherwise the origin time will be set by t0use.
* @param[in] t0use if iwantOT is not when this this is the source
* origin time (seconds)
* @param[in] mask if the i'th observation is 1 then it is masked from
* from the location [nobs]
* @param[in] tobs observed pick times (seconds) [nobs]
* @param[in] tcorr static corrections (seconds) for stations [nobs].
* if NULL then it is ignored and assumed all zero.
* @param[in] varobs variance in pick times (seconds) [nobs]
* @param[in] test traveltime tables for each observation [ldgrd x nobs]
* with leading dimension ldgrd.
*
* @param[out] t0 origin time (seconds) at each point in grid serach [ngrd]
* @param[out] objfn residual squared objective function at each point in
* the grid [ngrd].
*
* @author Ben Baker
*
* @copyright Apache 2
*
*/
int locate_l2_gridSearch__float64(const int ldgrd,
const int ngrd,
const int nobs,
const int iwantOT,
const float t0use,
const int *__restrict__ mask,
const float *__restrict__ tobs,
const float *__restrict__ tcorr,
const float *__restrict__ varobs,
const float *__restrict__ test,
float *__restrict__ t0,
float *__restrict__ objfn)
{
const char *fcnm = "locate_l2_gridSearch__float64\0";
float *tobsCor, *wtUse;
float xnorm __attribute__ ((aligned(64))) = 0.0f;
float tobs_i __attribute__ ((aligned(64))) = 0.0f;
float wt_i __attribute__ ((aligned(64))) = 0.0f;
const float zero = 0.0f;
const float one = 1.0f;
int ibeg, ierr, iobs, jobs, nobsUse, *obsPtr;
//------------------------------------------------------------------------//
//
// Error checking
ierr = 0;
if ((sizeof(float)*(size_t) ldgrd)%64 != 0 || ldgrd < ngrd || nobs < 1 ||
mask == NULL || tobs == NULL || varobs == NULL ||
test == NULL || t0 == NULL || objfn == NULL)
{
if ((sizeof(float)*(size_t) ldgrd)%64 != 0)
{
printf("%s: Error ldgrd must be divisible by 64\n", fcnm);
}
if (ldgrd < ngrd){printf("%s: Error ldgrd < ngrd\n", fcnm);}
if (mask == NULL){printf("%s: mask is null\n", fcnm);}
if (tobs == NULL){printf("%s: tobs is null\n", fcnm);}
if (varobs == NULL){printf("%s: varobs is null\n", fcnm);}
if (test == NULL){printf("%s: test is null\n", fcnm);}
if (t0 == NULL){printf("%s: t0 is null\n", fcnm);}
if (objfn == NULL){printf("%s: objfn is null\n", fcnm);}
ierr = 1;
return ierr;
}
// Require the arrays be 64 bit aligned
if (memory_isAligned(t0, 64) != 1 ||
memory_isAligned(test, 64) != 1 ||
memory_isAligned(objfn, 64) != 1)
{
printf("%s: Input arrays are not 64 bit aligned\n", fcnm);
ierr = 1;
return ierr;
}
// zero out the result
locate_nullFloat64(ngrd, objfn);
// Set the static corrections. While it would seem more sensible
// to add the correction to the estimate recall that we are ultimately
// interested in residuals so we instead remove it from the observation
// because t_obs - (t_est + t_stat) = t_obs - t_stat - t_est = t_cor - t_est
obsPtr = memory_calloc__int(nobs);
tobsCor = memory_calloc__float(nobs);
wtUse = memory_calloc__float(nobs);
nobsUse = 0;
xnorm = zero;
if (tcorr == NULL)
{
for (iobs=0; iobs<nobs; iobs++)
{
if (mask[iobs] == 0)
{
tobsCor[nobsUse] = tobs[iobs];
obsPtr[nobsUse] = iobs;
wtUse[nobsUse] = one/varobs[iobs];
xnorm = xnorm + wtUse[nobsUse]; //one/varobs[iobs];
nobsUse = nobsUse + 1;
}
}
}
else
{
for (iobs=0; iobs<nobs; iobs++)
{
if (mask[iobs] == 0)
{
tobsCor[nobsUse] = tobs[iobs] - tcorr[iobs];
obsPtr[nobsUse] = iobs;
wtUse[nobsUse] = one/varobs[iobs];
xnorm = xnorm + wtUse[nobsUse]; //one/varobs[iobs];
nobsUse = nobsUse + 1;
}
}
}
// Compute the least-squares origin time which is the average reisdual
if (iwantOT == 1)
{
locate_nullFloat64(ngrd, t0);
for (jobs=0; jobs<nobsUse; jobs++)
{
iobs = obsPtr[jobs];
tobs_i = tobsCor[jobs];
wt_i = wtUse[jobs]; //one/varUse[jobs];
ibeg = ldgrd*iobs;
locate_l2_stackT0__float64(ngrd, tobs_i, xnorm, wt_i,
&test[ibeg], t0);
}
}
// Set the desired residual
else
{
locate_setFloat64(ngrd, t0use, t0);
}
// Compute the locations with the origin times at each grid point
for (jobs=0; jobs<nobsUse; jobs++)
{
iobs = obsPtr[jobs];
tobs_i = tobsCor[jobs];
wt_i = wtUse[jobs]; //one/varUse[jobs];
ibeg = ldgrd*iobs;
locate_l2_stackObjfn__float64(ngrd, tobs_i, wt_i,
&test[ibeg], t0, objfn);
}
free(obsPtr);
free(tobsCor);
free(wtUse);
return ierr;
}
//============================================================================//
int locate_l1_gridSearch__double64(const int ldgrd,
const int ngrd,
const int nobs,
const int iwantOT,
const double t0use,
const int *__restrict__ mask,
const double *__restrict__ tobs,
const double *__restrict__ varobs,
const double *__restrict__ test,
double *__restrict__ t0,
double *__restrict__ objfn)
{
double *testPerm, *tobsUse, *wt, *wtSort, *res;//, *resSort;
double rsum __attribute__ ((aligned(64))) = 0.0;
double t0opt __attribute__ ((aligned(64))) = 0.0;
double tobs_i __attribute__ ((aligned(64))) = 0.0;
double wt_i __attribute__ ((aligned(64))) = 0.0;
const double one __attribute__ ((aligned(64))) = 1.0;
const double zero __attribute__ ((aligned(64))) = 0.0;
double wtsum, wtsumi;
int *obsPtr, *perm, ierr, igrd, igrd1, igrd2, iobs, indx, jgrd,
jndx, jobs, ldobs, nobsUse, nsort;
bool lsort;
const int nchunk = 256;
ierr = 0;
perm = memory_calloc__int(nobs);
obsPtr = memory_calloc__int(nobs);
tobsUse = memory_calloc__double(nobs);
wt = memory_calloc__double(nobs);
wtsum = 0.0;
nobsUse = 0;
for (iobs=0; iobs<nobs; iobs++)
{
if (mask[iobs] == 0)
{
wt[nobsUse] = one/varobs[iobs];
tobsUse[nobsUse] = tobs[iobs];
obsPtr[iobs] = iobs;
nobsUse = nobsUse + 1;
}
wtsum = wtsum + wt[iobs];
}
ldobs = nobsUse + 8 - nobsUse%8;
locate_nullDouble64(ngrd, objfn);
if (iwantOT == 1)
{
lsort = false;
nsort = 0;
// Zero out the origin time
locate_nullDouble64(ngrd, t0);
// Set space
res = memory_calloc__double(nobsUse);
//resSort = memory_calloc__double(nobsUse);
wtSort = memory_calloc__double(nobsUse);
// Normalize the weights?
wtsumi = one/wtsum;
if (fabs(wtsum - one) > 1.e-14)
{
for (iobs=0; iobs<nobsUse; iobs++)
{
wt[iobs] = wt[iobs]*wtsumi;
}
}
for (iobs=0; iobs<nobsUse; iobs++){perm[iobs] = iobs;}
/*
#pragma omp parallel for \
firstprivate (obsPtr, perm, res, tobsUse, wt, wtSort) \
private (ierr, igrd, igrd1, igrd2, iobs, jgrd, jndx, jobs, lsort, rsum, t0opt) \
shared (nchunk, ngrd, nobsUse, objfn, t0, test, zero) \
default (none) reduction (+:nsort)
*/
// Loop on chunks
for (jgrd=0; jgrd<ngrd; jgrd=jgrd+nchunk)
{
igrd1 = jgrd;
igrd2 = MIN(ngrd, jgrd+nchunk);
// Loop on the subgrid
for (igrd=igrd1; igrd<igrd2; igrd++)
{
// Extract the estimates
for (iobs=0; iobs<nobsUse; iobs++)
{
jobs = obsPtr[iobs];
jndx = jobs*ldgrd + igrd;
res[iobs] = tobsUse[iobs] - test[jndx];
wtSort[iobs] = wt[iobs];
}
t0opt = weightedMedian__double(nobsUse, res,
wtSort, perm, &lsort, &ierr);
if (lsort){nsort = nsort + 1;}
t0[igrd] = t0opt;
}
}
free(res);
printf("%d %d\n", nsort, ngrd);
}
// Simply set the origin time to t0
else
{
locate_setDouble64(ngrd, t0use, t0);
}
// Compute the L1 misfit function at all points - loop on chunks
#pragma omp parallel for \
private (igrd, igrd1, igrd2, iobs, jgrd, jobs, jndx, tobs_i, wt_i) \
shared (ldgrd, nchunk, nobsUse, objfn, obsPtr, test, t0, tobsUse, wt) \
default (none)
for (jgrd=0; jgrd<ngrd; jgrd=jgrd+nchunk)
{
igrd1 = jgrd;
igrd2 = MIN(ngrd, jgrd+nchunk);
// Compute the L1 norm for all observations
for (iobs=0; iobs<nobsUse; iobs++)
{
jobs = obsPtr[iobs];
tobs_i = tobsUse[iobs];
wt_i = wt[iobs];
// Extract the estimate at each grid point in chunk
for (igrd=igrd1; igrd<igrd2; igrd++)
{
jndx = jobs*ldgrd + igrd;
objfn[igrd] = objfn[igrd]
+ wt_i*fabs(tobs_i - test[jndx] - t0[igrd]);
}
}
}
//free(testPerm);
free(wt);
free(perm);
return ierr;
}
|
core.c | /* Generated by Cython 0.29.27 */
/* BEGIN: Cython Metadata
{
"distutils": {
"name": "monotonic_align.core",
"sources": [
"core.pyx"
]
},
"module_name": "monotonic_align.core"
}
END: Cython Metadata */
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_27"
#define CYTHON_HEX_VERSION 0x001D1BF0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030B00A1)
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#if PY_MAJOR_VERSION < 3
#include "longintrepr.h"
#endif
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_DefaultClassType PyType_Type
#if PY_VERSION_HEX >= 0x030B00A1
static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f,
PyObject *code, PyObject *c, PyObject* n, PyObject *v,
PyObject *fv, PyObject *cell, PyObject* fn,
PyObject *name, int fline, PyObject *lnos) {
PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL;
PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL;
const char *fn_cstr=NULL;
const char *name_cstr=NULL;
PyCodeObject* co=NULL;
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
if (!(kwds=PyDict_New())) goto end;
if (!(argcount=PyLong_FromLong(a))) goto end;
if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end;
if (!(posonlyargcount=PyLong_FromLong(0))) goto end;
if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end;
if (!(kwonlyargcount=PyLong_FromLong(k))) goto end;
if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end;
if (!(nlocals=PyLong_FromLong(l))) goto end;
if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end;
if (!(stacksize=PyLong_FromLong(s))) goto end;
if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end;
if (!(flags=PyLong_FromLong(f))) goto end;
if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end;
if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end;
if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end;
if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end;
if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too;
if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here
if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too;
Py_XDECREF((PyObject*)co);
co = (PyCodeObject*)call_result;
call_result = NULL;
if (0) {
cleanup_code_too:
Py_XDECREF((PyObject*)co);
co = NULL;
}
end:
Py_XDECREF(kwds);
Py_XDECREF(argcount);
Py_XDECREF(posonlyargcount);
Py_XDECREF(kwonlyargcount);
Py_XDECREF(nlocals);
Py_XDECREF(stacksize);
Py_XDECREF(replace);
Py_XDECREF(call_result);
Py_XDECREF(empty);
if (type) {
PyErr_Restore(type, value, traceback);
}
return co;
}
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#if defined(PyUnicode_IS_READY)
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#else
#define __Pyx_PyUnicode_READY(op) (0)
#endif
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#endif
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__monotonic_align__core
#define __PYX_HAVE_API__monotonic_align__core
/* Early includes */
#include "pythread.h"
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"core.pyx",
"stringsource",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each;
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each {
int __pyx_n;
float max_neg_val;
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
#if CYTHON_FAST_PYCALL
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif // CYTHON_FAST_PYCALL
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* DivInt[Py_ssize_t].proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* DivInt[long].proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag);
/* GCCDiagnostics.proto */
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'monotonic_align.core' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/
static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "monotonic_align.core"
extern int __pyx_module_is_main_monotonic_align__core;
int __pyx_module_is_main_monotonic_align__core = 0;
/* Implementation of 'monotonic_align.core' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_t_xs[] = "t_xs";
static const char __pyx_k_t_ys[] = "t_ys";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_paths[] = "paths";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_values[] = "values";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_paths;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_t_xs;
static PyObject *__pyx_n_s_t_ys;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_values;
static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static float __pyx_k_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__16;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__22;
static PyObject *__pyx_tuple__23;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__25;
static PyObject *__pyx_codeobj__26;
/* Late includes */
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) {
float __pyx_v_max_neg_val = __pyx_k_;
int __pyx_v_x;
int __pyx_v_y;
float __pyx_v_v_prev;
float __pyx_v_v_cur;
int __pyx_v_index;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
long __pyx_t_4;
int __pyx_t_5;
long __pyx_t_6;
long __pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
Py_ssize_t __pyx_t_10;
float __pyx_t_11;
float __pyx_t_12;
float __pyx_t_13;
int __pyx_t_14;
Py_ssize_t __pyx_t_15;
Py_ssize_t __pyx_t_16;
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_max_neg_val = __pyx_optional_args->max_neg_val;
}
}
/* "monotonic_align/core.pyx":13
* cdef float v_cur
* cdef float tmp
* cdef int index = t_x - 1 # <<<<<<<<<<<<<<
*
* for y in range(t_y):
*/
__pyx_v_index = (__pyx_v_t_x - 1);
/* "monotonic_align/core.pyx":15
* cdef int index = t_x - 1
*
* for y in range(t_y): # <<<<<<<<<<<<<<
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y:
*/
__pyx_t_1 = __pyx_v_t_y;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_y = __pyx_t_3;
/* "monotonic_align/core.pyx":16
*
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<<
* if x == y:
* v_cur = max_neg_val
*/
__pyx_t_4 = (__pyx_v_y + 1);
__pyx_t_5 = __pyx_v_t_x;
if (((__pyx_t_4 < __pyx_t_5) != 0)) {
__pyx_t_6 = __pyx_t_4;
} else {
__pyx_t_6 = __pyx_t_5;
}
__pyx_t_4 = __pyx_t_6;
__pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y);
__pyx_t_6 = 0;
if (((__pyx_t_5 > __pyx_t_6) != 0)) {
__pyx_t_7 = __pyx_t_5;
} else {
__pyx_t_7 = __pyx_t_6;
}
__pyx_t_6 = __pyx_t_4;
for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) {
__pyx_v_x = __pyx_t_5;
/* "monotonic_align/core.pyx":17
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y: # <<<<<<<<<<<<<<
* v_cur = max_neg_val
* else:
*/
__pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":18
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y:
* v_cur = max_neg_val # <<<<<<<<<<<<<<
* else:
* v_cur = value[y-1, x]
*/
__pyx_v_v_cur = __pyx_v_max_neg_val;
/* "monotonic_align/core.pyx":17
* for y in range(t_y):
* for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
* if x == y: # <<<<<<<<<<<<<<
* v_cur = max_neg_val
* else:
*/
goto __pyx_L7;
}
/* "monotonic_align/core.pyx":20
* v_cur = max_neg_val
* else:
* v_cur = value[y-1, x] # <<<<<<<<<<<<<<
* if x == 0:
* if y == 0:
*/
/*else*/ {
__pyx_t_9 = (__pyx_v_y - 1);
__pyx_t_10 = __pyx_v_x;
__pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )));
}
__pyx_L7:;
/* "monotonic_align/core.pyx":21
* else:
* v_cur = value[y-1, x]
* if x == 0: # <<<<<<<<<<<<<<
* if y == 0:
* v_prev = 0.
*/
__pyx_t_8 = ((__pyx_v_x == 0) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":22
* v_cur = value[y-1, x]
* if x == 0:
* if y == 0: # <<<<<<<<<<<<<<
* v_prev = 0.
* else:
*/
__pyx_t_8 = ((__pyx_v_y == 0) != 0);
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":23
* if x == 0:
* if y == 0:
* v_prev = 0. # <<<<<<<<<<<<<<
* else:
* v_prev = max_neg_val
*/
__pyx_v_v_prev = 0.;
/* "monotonic_align/core.pyx":22
* v_cur = value[y-1, x]
* if x == 0:
* if y == 0: # <<<<<<<<<<<<<<
* v_prev = 0.
* else:
*/
goto __pyx_L9;
}
/* "monotonic_align/core.pyx":25
* v_prev = 0.
* else:
* v_prev = max_neg_val # <<<<<<<<<<<<<<
* else:
* v_prev = value[y-1, x-1]
*/
/*else*/ {
__pyx_v_v_prev = __pyx_v_max_neg_val;
}
__pyx_L9:;
/* "monotonic_align/core.pyx":21
* else:
* v_cur = value[y-1, x]
* if x == 0: # <<<<<<<<<<<<<<
* if y == 0:
* v_prev = 0.
*/
goto __pyx_L8;
}
/* "monotonic_align/core.pyx":27
* v_prev = max_neg_val
* else:
* v_prev = value[y-1, x-1] # <<<<<<<<<<<<<<
* value[y, x] += max(v_prev, v_cur)
*
*/
/*else*/ {
__pyx_t_10 = (__pyx_v_y - 1);
__pyx_t_9 = (__pyx_v_x - 1);
__pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) )));
}
__pyx_L8:;
/* "monotonic_align/core.pyx":28
* else:
* v_prev = value[y-1, x-1]
* value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<<
*
* for y in range(t_y - 1, -1, -1):
*/
__pyx_t_11 = __pyx_v_v_cur;
__pyx_t_12 = __pyx_v_v_prev;
if (((__pyx_t_11 > __pyx_t_12) != 0)) {
__pyx_t_13 = __pyx_t_11;
} else {
__pyx_t_13 = __pyx_t_12;
}
__pyx_t_9 = __pyx_v_y;
__pyx_t_10 = __pyx_v_x;
*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13;
}
}
/* "monotonic_align/core.pyx":30
* value[y, x] += max(v_prev, v_cur)
*
* for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<<
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
*/
for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_y = __pyx_t_1;
/* "monotonic_align/core.pyx":31
*
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1 # <<<<<<<<<<<<<<
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
* index = index - 1
*/
__pyx_t_10 = __pyx_v_y;
__pyx_t_9 = __pyx_v_index;
*((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1;
/* "monotonic_align/core.pyx":32
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<<
* index = index - 1
*
*/
__pyx_t_14 = ((__pyx_v_index != 0) != 0);
if (__pyx_t_14) {
} else {
__pyx_t_8 = __pyx_t_14;
goto __pyx_L13_bool_binop_done;
}
__pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0);
if (!__pyx_t_14) {
} else {
__pyx_t_8 = __pyx_t_14;
goto __pyx_L13_bool_binop_done;
}
__pyx_t_9 = (__pyx_v_y - 1);
__pyx_t_10 = __pyx_v_index;
__pyx_t_15 = (__pyx_v_y - 1);
__pyx_t_16 = (__pyx_v_index - 1);
__pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0);
__pyx_t_8 = __pyx_t_14;
__pyx_L13_bool_binop_done:;
if (__pyx_t_8) {
/* "monotonic_align/core.pyx":33
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]):
* index = index - 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_index = (__pyx_v_index - 1);
/* "monotonic_align/core.pyx":32
* for y in range(t_y - 1, -1, -1):
* path[y, index] = 1
* if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<<
* index = index - 1
*
*/
}
}
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
/* function exit code */
}
/* "monotonic_align/core.pyx":38
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<<
* cdef int b = paths.shape[0]
* cdef int i
*/
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) {
CYTHON_UNUSED int __pyx_v_b;
int __pyx_v_i;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
__Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } };
Py_ssize_t __pyx_t_6;
Py_ssize_t __pyx_t_7;
/* "monotonic_align/core.pyx":39
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil:
* cdef int b = paths.shape[0] # <<<<<<<<<<<<<<
* cdef int i
* for i in prange(b, nogil=True):
*/
__pyx_v_b = (__pyx_v_paths.shape[0]);
/* "monotonic_align/core.pyx":41
* cdef int b = paths.shape[0]
* cdef int i
* for i in prange(b, nogil=True): # <<<<<<<<<<<<<<
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_1 = __pyx_v_b;
if ((1 == 0)) abort();
{
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_3 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_2);
/* "monotonic_align/core.pyx":42
* cdef int i
* for i in prange(b, nogil=True):
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<<
*/
__pyx_t_4.data = __pyx_v_paths.data;
__pyx_t_4.memview = __pyx_v_paths.memview;
__PYX_INC_MEMVIEW(&__pyx_t_4, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_i;
Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0];
__pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_4.shape[0] = __pyx_v_paths.shape[1];
__pyx_t_4.strides[0] = __pyx_v_paths.strides[1];
__pyx_t_4.suboffsets[0] = -1;
__pyx_t_4.shape[1] = __pyx_v_paths.shape[2];
__pyx_t_4.strides[1] = __pyx_v_paths.strides[2];
__pyx_t_4.suboffsets[1] = -1;
__pyx_t_5.data = __pyx_v_values.data;
__pyx_t_5.memview = __pyx_v_values.memview;
__PYX_INC_MEMVIEW(&__pyx_t_5, 0);
{
Py_ssize_t __pyx_tmp_idx = __pyx_v_i;
Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0];
__pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride;
}
__pyx_t_5.shape[0] = __pyx_v_values.shape[1];
__pyx_t_5.strides[0] = __pyx_v_values.strides[1];
__pyx_t_5.suboffsets[0] = -1;
__pyx_t_5.shape[1] = __pyx_v_values.shape[2];
__pyx_t_5.strides[1] = __pyx_v_values.strides[2];
__pyx_t_5.suboffsets[1] = -1;
__pyx_t_6 = __pyx_v_i;
__pyx_t_7 = __pyx_v_i;
__pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL);
__PYX_XDEC_MEMVIEW(&__pyx_t_4, 0);
__pyx_t_4.memview = NULL;
__pyx_t_4.data = NULL;
__PYX_XDEC_MEMVIEW(&__pyx_t_5, 0);
__pyx_t_5.memview = NULL;
__pyx_t_5.data = NULL;
}
}
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "monotonic_align/core.pyx":41
* cdef int b = paths.shape[0]
* cdef int i
* for i in prange(b, nogil=True): # <<<<<<<<<<<<<<
* maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i])
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L5;
}
__pyx_L5:;
}
}
/* "monotonic_align/core.pyx":38
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<<
* cdef int b = paths.shape[0]
* cdef int i
*/
/* function exit code */
}
/* Python wrapper */
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0};
PyObject* values[4] = {0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 4) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
}
__pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("maximum_path_c", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) }
if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) }
__pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_values, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(1, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(1, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(1, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(1, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(1, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(1, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(1, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(1, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(1, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(1, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(1, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(1, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(1, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(1, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(1, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(1, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(1, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(1, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_array___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryview___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryviewslice___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"monotonic_align.core._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_core(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_core},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"core",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1},
{&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__16);
__Pyx_GIVEREF(__pyx_slice__16);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__22);
__Pyx_GIVEREF(__pyx_tuple__22);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__23);
__Pyx_GIVEREF(__pyx_tuple__23);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__25);
__Pyx_GIVEREF(__pyx_tuple__25);
__pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC initcore(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_core(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
static PyThread_type_lock __pyx_t_2[8];
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
PyEval_InitThreads();
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_monotonic_align__core) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "monotonic_align.core")) {
if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "monotonic_align/core.pyx":7
* @cython.boundscheck(False)
* @cython.wraparound(False)
* cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<<
* cdef int x
* cdef int y
*/
__pyx_k_ = (-1e9);
/* "monotonic_align/core.pyx":1
* cimport cython # <<<<<<<<<<<<<<
* from cython.parallel import prange
*
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_2[0] = PyThread_allocate_lock();
__pyx_t_2[1] = PyThread_allocate_lock();
__pyx_t_2[2] = PyThread_allocate_lock();
__pyx_t_2[3] = PyThread_allocate_lock();
__pyx_t_2[4] = PyThread_allocate_lock();
__pyx_t_2[5] = PyThread_allocate_lock();
__pyx_t_2[6] = PyThread_allocate_lock();
__pyx_t_2[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init monotonic_align.core");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return;
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = Py_TYPE(func)->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (__Pyx_PyFastCFunction_Check(func)) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* DivInt[Py_ssize_t] */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* DivInt[long] */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
(void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = NULL;
PyObject *py_funcname = NULL;
#if PY_MAJOR_VERSION < 3
PyObject *py_srcfile = NULL;
py_srcfile = PyString_FromString(filename);
if (!py_srcfile) goto bad;
#endif
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
if (!py_funcname) goto bad;
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
if (!py_funcname) goto bad;
funcname = PyUnicode_AsUTF8(py_funcname);
if (!funcname) goto bad;
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
if (!py_funcname) goto bad;
#endif
}
#if PY_MAJOR_VERSION < 3
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
#else
py_code = PyCode_NewEmpty(filename, funcname, py_line);
#endif
Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline
return py_code;
bad:
Py_XDECREF(py_funcname);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_srcfile);
#endif
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->len > 0) {
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG,
(PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const char neg_one = (char) -1, const_zero = (char) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) {
if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) {
return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o);
#if PY_MAJOR_VERSION < 3
} else if (likely(PyInt_CheckExact(o))) {
return PyInt_AS_LONG(o);
#endif
} else {
Py_ssize_t ival;
PyObject *x;
x = PyNumber_Index(o);
if (!x) return -1;
ival = PyInt_AsLong(x);
Py_DECREF(x);
return ival;
}
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
dpado.202001280935.single_push_and_check.h | //
// Created by Zhen Peng on 1/6/20.
//
#ifndef PADO_DPADO_H
#define PADO_DPADO_H
#include <vector>
//#include <unordered_map>
#include <map>
#include <algorithm>
#include <iostream>
#include <limits.h>
//#include <xmmintrin.h>
#include <immintrin.h>
#include <bitset>
#include <math.h>
#include <fstream>
#include <omp.h>
#include "globals.h"
#include "dglobals.h"
#include "dgraph.h"
namespace PADO {
template <VertexID BATCH_SIZE = 1024>
class DistBVCPLL {
private:
static const VertexID BITPARALLEL_SIZE = 50;
const inti THRESHOLD_PARALLEL = 1 << 30;
// Structure for the type of label
struct IndexType {
struct Batch {
// VertexID batch_id; // Batch ID
VertexID start_index; // Index to the array distances where the batch starts
VertexID size; // Number of distances element in this batch
Batch() = default;
Batch(VertexID start_index_, VertexID size_):
start_index(start_index_), size(size_)
{ }
// Batch(VertexID batch_id_, VertexID start_index_, VertexID size_):
// batch_id(batch_id_), start_index(start_index_), size(size_)
// { }
};
struct DistanceIndexType {
VertexID start_index; // Index to the array vertices where the same-distance vertices start
VertexID size; // Number of the same-distance vertices
UnweightedDist dist; // The real distance
DistanceIndexType() = default;
DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_):
start_index(start_index_), size(size_), dist(dist_)
{ }
};
// Bit-parallel Labels
UnweightedDist bp_dist[BITPARALLEL_SIZE];
uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0}
std::vector<Batch> batches; // Batch info
std::vector<DistanceIndexType> distances; // Distance info
std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID
size_t get_size_in_bytes() const
{
return sizeof(bp_dist) +
sizeof(bp_sets) +
// batches.size() * sizeof(Batch) +
distances.size() * sizeof(DistanceIndexType) +
vertices.size() * sizeof(VertexID);
}
void clean_all_indices()
{
std::vector<Batch>().swap(batches);
std::vector<DistanceIndexType>().swap(distances);
std::vector<VertexID>().swap(vertices);
}
}; //__attribute__((aligned(64)));
struct ShortIndex {
// I use BATCH_SIZE + 1 bit for indicator bit array.
// The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already.
// In this way, it helps update_label_indices() and can be reset along with other indicator elements.
// std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already
// If the Batch structure is not used, the indicator could just be BATCH_SIZE long.
// std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0);
std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0);
// Use a queue to store candidates
std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE);
VertexID end_candidates_que = 0;
std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0);
void indicator_reset()
{
std::fill(indicator.begin(), indicator.end(), 0);
}
}; //__attribute__((aligned(64)));
// Type of Bit-Parallel Label
struct BPLabelType {
UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 };
uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0}
};
// Type of Label Message Unit, for initializing distance table
struct LabelTableUnit {
VertexID root_id;
VertexID label_global_id;
UnweightedDist dist;
LabelTableUnit() = default;
LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
root_id(r), label_global_id(l), dist(d) {}
};
// Type of BitParallel Label Message Unit for initializing bit-parallel labels
struct MsgBPLabel {
VertexID r_root_id;
UnweightedDist bp_dist[BITPARALLEL_SIZE];
uint64_t bp_sets[BITPARALLEL_SIZE][2];
MsgBPLabel() = default;
MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
: r_root_id(r)
{
memcpy(bp_dist, dist, sizeof(bp_dist));
memcpy(bp_sets, sets, sizeof(bp_sets));
}
};
VertexID num_v = 0;
VertexID num_masters = 0;
// VertexID BATCH_SIZE = 0;
int host_id = 0;
int num_hosts = 0;
MPI_Datatype V_ID_Type;
std::vector<IndexType> L;
inline void bit_parallel_push_labels(
const DistGraph &G,
VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
std::vector<VertexID> &tmp_q,
VertexID &size_tmp_q,
std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es,
VertexID &size_tmp_sibling_es,
std::vector< std::pair<VertexID, VertexID> > &tmp_child_es,
VertexID &size_tmp_child_es,
const VertexID &offset_tmp_q,
std::vector<UnweightedDist> &dists,
UnweightedDist iter);
inline void bit_parallel_labeling(
const DistGraph &G,
std::vector<uint8_t> &used_bp_roots);
// inline void bit_parallel_push_labels(
// const DistGraph &G,
// VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
// std::vector<UnweightedDist> &dists,
// UnweightedDist iter);
// inline void bit_parallel_labeling(
// const DistGraph &G,
//// std::vector<IndexType> &L,
// std::vector<uint8_t> &used_bp_roots);
inline void batch_process(
const DistGraph &G,
// const VertexID b_id,
const VertexID roots_start,
const VertexID roots_size,
const std::vector<uint8_t> &used_bp_roots,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<uint8_t> &is_active,
// std::vector<bool> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated);
// std::vector<bool> &once_candidated);
inline VertexID initialization(
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// std::vector<bool> &once_candidated,
// VertexID b_id,
VertexID roots_start,
VertexID roots_size,
// std::vector<VertexID> &roots_master_local,
const std::vector<uint8_t> &used_bp_roots);
// inline void push_single_label(
// VertexID v_head_global,
// VertexID label_root_id,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter);
inline void schedule_label_pushing_para(
const DistGraph &G,
const VertexID roots_start,
const std::vector<uint8_t> &used_bp_roots,
const std::vector<VertexID> &active_queue,
const VertexID global_start,
const VertexID global_size,
const VertexID local_size,
// const VertexID start_active_queue,
// const VertexID size_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
const std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
std::vector<uint8_t> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const UnweightedDist iter);
inline void local_push_labels_seq(
VertexID v_head_global,
EdgeID start_index,
EdgeID bound_index,
VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// std::vector<bool> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter);
inline void local_push_labels_para(
const VertexID v_head_global,
const EdgeID start_index,
const EdgeID bound_index,
const VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
std::vector<VertexID> &tmp_got_candidates_queue,
VertexID &size_tmp_got_candidates_queue,
const VertexID offset_tmp_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
std::vector<VertexID> &tmp_once_candidated_queue,
VertexID &size_tmp_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter);
// inline void local_push_labels(
// VertexID v_head_local,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter);
inline void schedule_label_inserting_para(
const DistGraph &G,
const VertexID roots_start,
const VertexID roots_size,
std::vector<ShortIndex> &short_index,
const std::vector< std::vector<UnweightedDist> > &dist_table,
const std::vector<VertexID> &got_candidates_queue,
const VertexID start_got_candidates_queue,
const VertexID size_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<uint8_t> &is_active,
std::vector< std::pair<VertexID, VertexID> > &buffer_send,
const VertexID iter);
inline bool distance_query(
VertexID cand_root_id,
VertexID v_id,
VertexID roots_start,
// const std::vector<IndexType> &L,
const std::vector< std::vector<UnweightedDist> > &dist_table,
UnweightedDist iter);
inline void insert_label_only_seq(
VertexID cand_root_id,
// VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::pair<VertexID, VertexID> > &buffer_send);
// UnweightedDist iter);
inline void insert_label_only_para(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send)
std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send,
EdgeID &size_tmp_buffer_send,
const EdgeID offset_tmp_buffer_send);
inline void update_label_indices(
const VertexID v_id,
const VertexID inserted_count,
// std::vector<IndexType> &L,
std::vector<ShortIndex> &short_index,
// VertexID b_id,
const UnweightedDist iter);
inline void reset_at_end(
const DistGraph &G,
// VertexID roots_start,
// const std::vector<VertexID> &roots_master_local,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
const std::vector<VertexID> &once_candidated_queue,
const VertexID end_once_candidated_queue);
// template <typename E_T, typename F>
// inline void every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun);
template <typename E_T>
inline void one_host_bcasts_buffer_to_buffer(
int root,
std::vector<E_T> &buffer_send,
std::vector<E_T> &buffer_recv);
// // Function: get the destination host id which is i hop from this host.
// // For example, 1 hop from host 2 is host 0 (assume total 3 hosts);
// // -1 hop from host 0 is host 2.
// int hop_2_me_host_id(int hop) const
// {
// assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0);
// return (host_id + hop + num_hosts) % num_hosts;
// }
// // Function: get the destination host id which is i hop from the root.
// // For example, 1 hop from host 2 is host 0 (assume total 3 hosts);
// // -1 hop from host 0 is host 2.
// int hop_2_root_host_id(int hop, int root) const
// {
// assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0);
// assert(root >= 0 && root < num_hosts);
// return (root + hop + num_hosts) % num_hosts;
// }
size_t get_index_size()
{
size_t bytes = 0;
for (VertexID v_i = 0; v_i < num_masters; ++v_i) {
bytes += L[v_i].get_size_in_bytes();
}
return bytes;
}
// Test only
// uint64_t normal_hit_count = 0;
// uint64_t bp_hit_count = 0;
// uint64_t total_check_count = 0;
// uint64_t normal_check_count = 0;
// uint64_t total_candidates_num = 0;
// uint64_t set_candidates_num = 0;
// double initializing_time = 0;
// double candidating_time = 0;
// double adding_time = 0;
// double distance_query_time = 0;
// double init_index_time = 0;
// double init_dist_matrix_time = 0;
// double init_start_reset_time = 0;
// double init_indicators_time = 0;
//L2CacheMissRate cache_miss;
// double message_time = 0;
// double bp_labeling_time = 0;
// double initializing_time = 0;
// double scatter_time = 0;
// double gather_time = 0;
// double clearup_time = 0;
// TotalInstructsExe candidating_ins_count;
// TotalInstructsExe adding_ins_count;
// TotalInstructsExe bp_labeling_ins_count;
// TotalInstructsExe bp_checking_ins_count;
// TotalInstructsExe dist_query_ins_count;
// uint64_t caller_line = 0;
// End test
public:
// std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0);
DistBVCPLL() = default;
explicit DistBVCPLL(
const DistGraph &G);
// UnweightedDist dist_distance_query_pair(
// VertexID a_global,
// VertexID b_global,
// const DistGraph &G);
}; // class DistBVCPLL
template <VertexID BATCH_SIZE>
DistBVCPLL<BATCH_SIZE>::
DistBVCPLL(
const DistGraph &G)
{
num_v = G.num_v;
assert(num_v >= BATCH_SIZE);
num_masters = G.num_masters;
host_id = G.host_id;
// {
// if (1 == host_id) {
// volatile int i = 0;
// while (i == 0) {
// sleep(5);
// }
// }
// }
num_hosts = G.num_hosts;
V_ID_Type = G.V_ID_Type;
// L.resize(num_v);
L.resize(num_masters);
VertexID remainer = num_v % BATCH_SIZE;
VertexID b_i_bound = num_v / BATCH_SIZE;
std::vector<uint8_t> used_bp_roots(num_v, 0);
//cache_miss.measure_start();
double time_labeling = -WallTimer::get_time_mark();
// bp_labeling_time -= WallTimer::get_time_mark();
bit_parallel_labeling(G,
used_bp_roots);
// bp_labeling_time += WallTimer::get_time_mark();
{//test
//#ifdef DEBUG_MESSAGES_ON
if (0 == host_id) {
printf("host_id: %u bp_labeling_finished.\n", host_id);
}
//#endif
}
std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue.
VertexID end_active_queue = 0;
std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue.
// std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue.
std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue.
VertexID end_got_candidates_queue = 0;
std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue
// std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue
std::vector<ShortIndex> short_index(num_masters);
std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST));
std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue.
// Used mainly for resetting short_index[v].indicator.
VertexID end_once_candidated_queue = 0;
std::vector<uint8_t> once_candidated(num_masters, false);
// std::vector<bool> once_candidated(num_masters, false);
std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table.
std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels
//printf("b_i_bound: %u\n", b_i_bound);//test
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
//// {
//////#ifdef DEBUG_MESSAGES_ON
// if (0 == host_id) {
// printf("b_i: %u\n", b_i);//test
// }
//////#endif
//// }
batch_process(
G,
// b_i,
b_i * BATCH_SIZE,
BATCH_SIZE,
// L,
used_bp_roots,
active_queue,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated);
// exit(EXIT_SUCCESS); //test
}
if (remainer != 0) {
// {
////#ifdef DEBUG_MESSAGES_ON
// if (0 == host_id) {
// printf("b_i: %u\n", b_i_bound);//test
// }
////#endif
// }
batch_process(
G,
// b_i_bound,
b_i_bound * BATCH_SIZE,
remainer,
// L,
used_bp_roots,
active_queue,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated);
}
time_labeling += WallTimer::get_time_mark();
//cache_miss.measure_stop();
// Test
setlocale(LC_NUMERIC, "");
if (0 == host_id) {
printf("BATCH_SIZE: %u ", BATCH_SIZE);
printf("BP_Size: %u THRESHOLD_PARALLEL: %u\n", BITPARALLEL_SIZE, THRESHOLD_PARALLEL);
}
{// Total Number of Labels
EdgeID local_num_labels = 0;
for (VertexID v_global = 0; v_global < num_v; ++v_global) {
if (G.get_master_host_id(v_global) != host_id) {
continue;
}
local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size();
}
EdgeID global_num_labels;
MPI_Allreduce(&local_num_labels,
&global_num_labels,
1,
MPI_Instance::get_mpi_datatype<EdgeID>(),
MPI_SUM,
MPI_COMM_WORLD);
// printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels);
MPI_Barrier(MPI_COMM_WORLD);
if (0 == host_id) {
printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v);
}
// VertexID local_num_batches = 0;
// VertexID local_num_distances = 0;
//// double local_avg_distances_per_batches = 0;
// for (VertexID v_global = 0; v_global < num_v; ++v_global) {
// if (G.get_master_host_id(v_global) != host_id) {
// continue;
// }
// VertexID v_local = G.get_local_vertex_id(v_global);
// local_num_batches += L[v_local].batches.size();
// local_num_distances += L[v_local].distances.size();
//// double avg_d_p_b = 0;
//// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) {
//// avg_d_p_b += L[v_local].batches[i_b].size;
//// }
//// avg_d_p_b /= L[v_local].batches.size();
//// local_avg_distances_per_batches += avg_d_p_b;
// }
//// local_avg_distances_per_batches /= num_masters;
//// double local_avg_batches = local_num_batches * 1.0 / num_masters;
//// double local_avg_distances = local_num_distances * 1.0 / num_masters;
// uint64_t global_num_batches = 0;
// uint64_t global_num_distances = 0;
// MPI_Allreduce(
// &local_num_batches,
// &global_num_batches,
// 1,
// MPI_UINT64_T,
// MPI_SUM,
// MPI_COMM_WORLD);
//// global_avg_batches /= num_hosts;
// MPI_Allreduce(
// &local_num_distances,
// &global_num_distances,
// 1,
// MPI_UINT64_T,
// MPI_SUM,
// MPI_COMM_WORLD);
//// global_avg_distances /= num_hosts;
// double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches;
// double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances;
// double global_avg_batches = global_num_batches / num_v;
// double global_avg_distances = global_num_distances / num_v;
//// MPI_Allreduce(
//// &local_avg_distances_per_batches,
//// &global_avg_d_p_b,
//// 1,
//// MPI_DOUBLE,
//// MPI_SUM,
//// MPI_COMM_WORLD);
//// global_avg_d_p_b /= num_hosts;
// MPI_Barrier(MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("global_avg_batches: %f "
// "global_avg_distances: %f "
// "global_avg_distances_per_batch: %f "
// "global_avg_labels_per_distance: %f\n",
// global_avg_batches,
// global_avg_distances,
// global_avg_d_p_b,
// global_avg_l_p_d);
// }
}
// printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100);
// printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100);
// printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100);
// printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100);
// printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100);
// printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100);
// printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100);
// printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100);
// printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100);
// uint64_t total_check_count = bp_hit_count + normal_check_count;
// printf("total_check_count: %'llu\n", total_check_count);
// printf("bp_hit_count: %'llu %.2f%%\n",
// bp_hit_count,
// bp_hit_count * 100.0 / total_check_count);
// printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count);
// printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n",
// total_candidates_num,
// set_candidates_num,
// set_candidates_num * 100.0 / total_candidates_num);
// printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n",
// normal_hit_count,
// normal_hit_count * 100.0 / total_check_count,
// normal_hit_count * 100.0 / (total_check_count - bp_hit_count));
//cache_miss.print();
// printf("Candidating: "); candidating_ins_count.print();
// printf("Adding: "); adding_ins_count.print();
// printf("BP_Labeling: "); bp_labeling_ins_count.print();
// printf("BP_Checking: "); bp_checking_ins_count.print();
// printf("distance_query: "); dist_query_ins_count.print();
// if (0 == host_id) {
// printf("num_hosts: %u host_id: %u\n"
// "Local_labeling_time: %.2f seconds\n"
// "bp_labeling_time: %.2f %.2f%%\n"
// "initializing_time: %.2f %.2f%%\n"
// "scatter_time: %.2f %.2f%%\n"
// "gather_time: %.2f %.2f%%\n"
// "clearup_time: %.2f %.2f%%\n"
// "message_time: %.2f %.2f%%\n",
// num_hosts, host_id,
// time_labeling,
// bp_labeling_time, 100.0 * bp_labeling_time / time_labeling,
// initializing_time, 100.0 * initializing_time / time_labeling,
// scatter_time, 100.0 * scatter_time / time_labeling,
// gather_time, 100.0 * gather_time / time_labeling,
// clearup_time, 100.0 * clearup_time / time_labeling,
// message_time, 100.0 * message_time / time_labeling);
// }
double global_time_labeling;
MPI_Allreduce(&time_labeling,
&global_time_labeling,
1,
MPI_DOUBLE,
MPI_MAX,
MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
if (0 == host_id) {
printf("num_hosts: %d "
"num_threads: %d "
"Global_labeling_time: %.2f seconds\n",
num_hosts,
NUM_THREADS,
global_time_labeling);
}
// End test
}
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling(
// const DistGraph &G,
// std::vector<uint8_t> &used_bp_roots)
//{
//// VertexID num_v = G.num_v;
// EdgeID num_e = G.num_e;
//
// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v
// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
// std::vector<VertexID> que(num_v); // active queue
// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0)
// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1.
//
// VertexID r = 0; // root r
// for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// while (r < num_v && used_bp_roots[r]) {
// ++r;
// }
// if (r == num_v) {
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
// }
// continue;
// }
// used_bp_roots[r] = true;
//
// fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
// fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
//
// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
// que[que_h++] = r;
// tmp_d[r] = 0;
// que_t1 = que_h;
//
// int ns = 0; // number of selected neighbor, default 64
// // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward
// // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF.
//// VertexID i_bound = G.vertices[r] - 1;
//// VertexID i_start = i_bound + G.out_degrees[r];
//// for (VertexID i = i_start; i > i_bound; --i) {
// //int i_bound = G.vertices[r];
// //int i_start = i_bound + G.out_degrees[r] - 1;
// //for (int i = i_start; i >= i_bound; --i) {
// VertexID d_i_bound = G.local_out_degrees[r];
// EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1;
// for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) {
// EdgeID i = i_start - d_i;
// VertexID v = G.out_edges[i];
// if (!used_bp_roots[v]) {
// used_bp_roots[v] = true;
// // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set)
// que[que_h++] = v;
// tmp_d[v] = 1;
// tmp_s[v].first = 1ULL << ns;
// if (++ns == 64) break;
// }
// }
// //}
//// }
//
// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
// VertexID num_sibling_es = 0, num_child_es = 0;
//
// for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) {
// VertexID v = que[que_i];
//// bit_parallel_push_labels(G,
//// v,
//// que,
//// que_h,
//// sibling_es,
//// num_sibling_es,
//// child_es,
//// num_child_es,
//// tmp_d,
//// d);
// EdgeID i_start = G.vertices_idx[v];
// EdgeID i_bound = i_start + G.local_out_degrees[v];
// for (EdgeID i = i_start; i < i_bound; ++i) {
// VertexID tv = G.out_edges[i];
// UnweightedDist td = d + 1;
//
// if (d > tmp_d[tv]) {
// ;
// }
// else if (d == tmp_d[tv]) {
// if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph.
// sibling_es[num_sibling_es].first = v;
// sibling_es[num_sibling_es].second = tv;
// ++num_sibling_es;
// }
// } else { // d < tmp_d[tv]
// if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) {
// que[que_h++] = tv;
// tmp_d[tv] = td;
// }
// child_es[num_child_es].first = v;
// child_es[num_child_es].second = tv;
// ++num_child_es;
// }
// }
// }
//
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first, w = sibling_es[i].second;
// tmp_s[v].second |= tmp_s[w].first;
// tmp_s[w].second |= tmp_s[v].first;
// }
// for (VertexID i = 0; i < num_child_es; ++i) {
// VertexID v = child_es[i].first, c = child_es[i].second;
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
// }
//
// {// test
// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es);
//// if (4 == d) {
//// exit(EXIT_SUCCESS);
//// }
// }
//
// que_t0 = que_t1;
// que_t1 = que_h;
// }
//
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = tmp_d[v];
// L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1}
// L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1}
// }
// }
//
//}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
bit_parallel_push_labels(
const DistGraph &G,
const VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
std::vector<VertexID> &tmp_q,
VertexID &size_tmp_q,
std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es,
VertexID &size_tmp_sibling_es,
std::vector< std::pair<VertexID, VertexID> > &tmp_child_es,
VertexID &size_tmp_child_es,
const VertexID &offset_tmp_q,
std::vector<UnweightedDist> &dists,
const UnweightedDist iter)
{
EdgeID i_start = G.vertices_idx[v_global];
EdgeID i_bound = i_start + G.local_out_degrees[v_global];
// {//test
// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]);
// }
for (EdgeID i = i_start; i < i_bound; ++i) {
VertexID tv_global = G.out_edges[i];
VertexID tv_local = G.get_local_vertex_id(tv_global);
UnweightedDist td = iter + 1;
if (iter > dists[tv_local]) {
;
} else if (iter == dists[tv_local]) {
if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph.
tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global;
tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global;
++size_tmp_sibling_es;
// sibling_es[num_sibling_es].first = v_global;
// sibling_es[num_sibling_es].second = tv_global;
// ++num_sibling_es;
}
} else { // iter < dists[tv]
if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) {
tmp_q[offset_tmp_q + size_tmp_q++] = tv_global;
}
}
// if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
// tmp_que[end_tmp_que++] = tv_global;
// dists[tv_local] = td;
// }
tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global;
tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global;
++size_tmp_child_es;
// child_es[num_child_es].first = v_global;
// child_es[num_child_es].second = tv_global;
// ++num_child_es;
}
}
}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
bit_parallel_labeling(
const DistGraph &G,
// std::vector<IndexType> &L,
std::vector<uint8_t> &used_bp_roots)
{
// Class type of Bit-Parallel label message unit.
struct MsgUnitBP {
VertexID v_global;
uint64_t S_n1;
uint64_t S_0;
MsgUnitBP() = default;
// MsgUnitBP(MsgUnitBP&& other) = default;
// MsgUnitBP(MsgUnitBP& other) = default;
// MsgUnitBP& operator=(const MsgUnitBP& other) = default;
// MsgUnitBP& operator=(MsgUnitBP&& other) = default;
MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0)
: v_global(v), S_n1(sn1), S_0(s0) { }
};
// VertexID num_v = G.num_v;
// EdgeID num_e = G.num_e;
EdgeID local_num_edges = G.num_edges_local;
std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v
std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
std::vector<VertexID> que(num_masters); // active queue
VertexID end_que = 0;
std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que
VertexID end_tmp_que = 0;
std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0)
std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1.
VertexID r_global = 0; // root r
for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// {// test
// if (0 == host_id) {
// printf("i_bpsp: %u\n", i_bpspt);
// }
// }
// Select the root r_global
if (0 == host_id) {
while (r_global < num_v && used_bp_roots[r_global]) {
++r_global;
}
if (r_global == num_v) {
for (VertexID v = 0; v < num_v; ++v) {
L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
}
continue;
}
}
// Broadcast the r here.
// message_time -= WallTimer::get_time_mark();
MPI_Bcast(&r_global,
1,
V_ID_Type,
0,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
used_bp_roots[r_global] = 1;
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt);
// }
// }
//#endif
// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
// Mark the r_global
if (G.get_master_host_id(r_global) == host_id) {
tmp_d[G.get_local_vertex_id(r_global)] = 0;
que[end_que++] = r_global;
}
// Select the r_global's 64 neighbors
{
// Get r_global's neighbors into buffer_send, rank from high to low.
VertexID local_degree = G.local_out_degrees[r_global];
std::vector<VertexID> buffer_send(local_degree);
if (local_degree) {
EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1;
for (VertexID d_i = 0; d_i < local_degree; ++d_i) {
EdgeID e_i = e_i_start - d_i;
buffer_send[d_i] = G.out_edges[e_i];
}
}
// Get selected neighbors (up to 64)
std::vector<VertexID> selected_nbrs;
if (0 != host_id) {
// Every host other than 0 sends neighbors to host 0
// message_time -= WallTimer::get_time_mark();
MPI_Instance::send_buffer_2_dst(buffer_send,
0,
SENDING_ROOT_NEIGHBORS,
SENDING_SIZE_ROOT_NEIGHBORS);
// Receive selected neighbors from host 0
MPI_Instance::recv_buffer_from_src(selected_nbrs,
0,
SENDING_SELECTED_NEIGHBORS,
SENDING_SIZE_SELETED_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
} else {
// Host 0
// Host 0 receives neighbors from others
std::vector<VertexID> all_nbrs(buffer_send);
std::vector<VertexID > buffer_recv;
for (int loc = 0; loc < num_hosts - 1; ++loc) {
// message_time -= WallTimer::get_time_mark();
MPI_Instance::recv_buffer_from_any(buffer_recv,
SENDING_ROOT_NEIGHBORS,
SENDING_SIZE_ROOT_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
if (buffer_recv.empty()) {
continue;
}
buffer_send.resize(buffer_send.size() + buffer_recv.size());
std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin());
all_nbrs.resize(buffer_send.size());
all_nbrs.assign(buffer_send.begin(), buffer_send.end());
}
assert(all_nbrs.size() == G.get_global_out_degree(r_global));
// Select 64 (or less) neighbors
VertexID ns = 0; // number of selected neighbor, default 64
for (VertexID v_global : all_nbrs) {
if (used_bp_roots[v_global]) {
continue;
}
used_bp_roots[v_global] = 1;
selected_nbrs.push_back(v_global);
if (++ns == 64) {
break;
}
}
// Send selected neighbors to other hosts
// message_time -= WallTimer::get_time_mark();
for (int dest = 1; dest < num_hosts; ++dest) {
MPI_Instance::send_buffer_2_dst(selected_nbrs,
dest,
SENDING_SELECTED_NEIGHBORS,
SENDING_SIZE_SELETED_NEIGHBORS);
}
// message_time += WallTimer::get_time_mark();
}
// {//test
// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size());
// }
// Synchronize the used_bp_roots.
for (VertexID v_global : selected_nbrs) {
used_bp_roots[v_global] = 1;
}
// Mark selected neighbors
for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) {
VertexID v_global = selected_nbrs[v_i];
if (host_id != G.get_master_host_id(v_global)) {
continue;
}
tmp_que[end_tmp_que++] = v_global;
tmp_d[G.get_local_vertex_id(v_global)] = 1;
tmp_s[v_global].first = 1ULL << v_i;
}
}
// Reduce the global number of active vertices
VertexID global_num_actives = 1;
UnweightedDist d = 0;
while (global_num_actives) {
{// Limit the distance
if (d > 7) {
break;
}
}
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("d: %u que_size: %u\n", d, global_num_actives);
// }
// }
//#endif
// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
VertexID num_sibling_es = 0, num_child_es = 0;
// Send active masters to mirrors
{
std::vector<MsgUnitBP> buffer_send(end_que);
for (VertexID que_i = 0; que_i < end_que; ++que_i) {
VertexID v_global = que[que_i];
buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second);
}
// {// test
// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size());
// }
for (int root = 0; root < num_hosts; ++root) {
std::vector<MsgUnitBP> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
// For parallel adding to queue
VertexID size_buffer_recv = buffer_recv.size();
std::vector<VertexID> offsets_tmp_q(size_buffer_recv);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) {
offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global];
}
VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q);
std::vector<VertexID> tmp_q(num_neighbors);
std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0);
// For parallel adding to sibling_es
std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors);
std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0);
// For parallel adding to child_es
std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors);
std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0);
#pragma omp parallel for
// for (const MsgUnitBP &m : buffer_recv) {
for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) {
const MsgUnitBP &m = buffer_recv[i_m];
VertexID v_global = m.v_global;
if (!G.local_out_degrees[v_global]) {
continue;
}
tmp_s[v_global].first = m.S_n1;
tmp_s[v_global].second = m.S_0;
// Push labels
bit_parallel_push_labels(
G,
v_global,
tmp_q,
sizes_tmp_q[i_m],
tmp_sibling_es,
sizes_tmp_sibling_es[i_m],
tmp_child_es,
sizes_tmp_child_es[i_m],
offsets_tmp_q[i_m],
// tmp_que,
// end_tmp_que,
// sibling_es,
// num_sibling_es,
// child_es,
// num_child_es,
tmp_d,
d);
}
{// From tmp_sibling_es to sibling_es
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es);
PADO::collect_into_queue(
tmp_sibling_es,
offsets_tmp_q,
sizes_tmp_sibling_es,
total_size_tmp,
sibling_es,
num_sibling_es);
}
{// From tmp_child_es to child_es
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es);
PADO::collect_into_queue(
tmp_child_es,
offsets_tmp_q,
sizes_tmp_child_es,
total_size_tmp,
child_es,
num_child_es);
}
{// From tmp_q to tmp_que
idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q);
PADO::collect_into_queue(
tmp_q,
offsets_tmp_q,
sizes_tmp_q,
total_size_tmp,
tmp_que,
end_tmp_que);
}
// {// test
// printf("host_id: %u root: %u done push.\n", host_id, root);
// }
}
}
// Update the sets in tmp_s
{
#pragma omp parallel for
for (VertexID i = 0; i < num_sibling_es; ++i) {
VertexID v = sibling_es[i].first, w = sibling_es[i].second;
__atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST);
// tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!!
// tmp_s[w].second |= tmp_s[v].first;
}
// Put into the buffer sending to others
std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es);
#pragma omp parallel for
for (VertexID i = 0; i < num_sibling_es; ++i) {
VertexID v = sibling_es[i].first;
VertexID w = sibling_es[i].second;
buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second);
buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second);
}
// Send the messages
for (int root = 0; root < num_hosts; ++root) {
std::vector< std::pair<VertexID, uint64_t> > buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
size_t i_m_bound = buffer_recv.size();
#pragma omp parallel for
for (size_t i_m = 0; i_m < i_m_bound; ++i_m) {
const auto &m = buffer_recv[i_m];
__atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST);
}
// for (const std::pair<VertexID, uint64_t> &m : buffer_recv) {
// tmp_s[m.first].second |= m.second;
// }
}
#pragma omp parallel for
for (VertexID i = 0; i < num_child_es; ++i) {
VertexID v = child_es[i].first, c = child_es[i].second;
__atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST);
__atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST);
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
}
}
//#ifdef DEBUG_MESSAGES_ON
// {// test
// VertexID global_num_sibling_es;
// VertexID global_num_child_es;
// MPI_Allreduce(&num_sibling_es,
// &global_num_sibling_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// MPI_Allreduce(&num_child_es,
// &global_num_child_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es);
// }
//
//// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es);
//// if (0 == d) {
//// exit(EXIT_SUCCESS);
//// }
// }
//#endif
// Swap que and tmp_que
tmp_que.swap(que);
end_que = end_tmp_que;
end_tmp_que = 0;
// message_time -= WallTimer::get_time_mark();
MPI_Allreduce(&end_que,
&global_num_actives,
1,
V_ID_Type,
MPI_MAX,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// }
++d;
}
#pragma omp parallel for
for (VertexID v_local = 0; v_local < num_masters; ++v_local) {
VertexID v_global = G.get_global_vertex_id(v_local);
L[v_local].bp_dist[i_bpspt] = tmp_d[v_local];
L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1}
L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1}
}
}
}
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//bit_parallel_push_labels(
// const DistGraph &G,
// const VertexID v_global,
// std::vector<VertexID> &tmp_que,
// VertexID &end_tmp_que,
// std::vector< std::pair<VertexID, VertexID> > &sibling_es,
// VertexID &num_sibling_es,
// std::vector< std::pair<VertexID, VertexID> > &child_es,
// VertexID &num_child_es,
// std::vector<UnweightedDist> &dists,
// const UnweightedDist iter)
//{
// EdgeID i_start = G.vertices_idx[v_global];
// EdgeID i_bound = i_start + G.local_out_degrees[v_global];
//// {//test
//// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]);
//// }
// for (EdgeID i = i_start; i < i_bound; ++i) {
// VertexID tv_global = G.out_edges[i];
// VertexID tv_local = G.get_local_vertex_id(tv_global);
// UnweightedDist td = iter + 1;
//
// if (iter > dists[tv_local]) {
// ;
// } else if (iter == dists[tv_local]) {
// if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph.
// sibling_es[num_sibling_es].first = v_global;
// sibling_es[num_sibling_es].second = tv_global;
// ++num_sibling_es;
// }
// } else { // iter < dists[tv]
// if (dists[tv_local] == MAX_UNWEIGHTED_DIST) {
// tmp_que[end_tmp_que++] = tv_global;
// dists[tv_local] = td;
// }
// child_es[num_child_es].first = v_global;
// child_es[num_child_es].second = tv_global;
// ++num_child_es;
//// {
//// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test
//// }
// }
// }
//
//}
//
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//bit_parallel_labeling(
// const DistGraph &G,
//// std::vector<IndexType> &L,
// std::vector<uint8_t> &used_bp_roots)
//{
// // Class type of Bit-Parallel label message unit.
// struct MsgUnitBP {
// VertexID v_global;
// uint64_t S_n1;
// uint64_t S_0;
//
// MsgUnitBP() = default;
//// MsgUnitBP(MsgUnitBP&& other) = default;
//// MsgUnitBP(MsgUnitBP& other) = default;
//// MsgUnitBP& operator=(const MsgUnitBP& other) = default;
//// MsgUnitBP& operator=(MsgUnitBP&& other) = default;
// MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0)
// : v_global(v), S_n1(sn1), S_0(s0) { }
// };
//// VertexID num_v = G.num_v;
//// EdgeID num_e = G.num_e;
// EdgeID local_num_edges = G.num_edges_local;
//
// std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v
// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
// std::vector<VertexID> que(num_masters); // active queue
// VertexID end_que = 0;
// std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que
// VertexID end_tmp_que = 0;
// std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0)
// std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1.
//
//// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v
//// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0}
//// std::vector<VertexID> que(num_v); // active queue
//// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0)
//// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1.
//
// VertexID r_global = 0; // root r
// for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) {
// // Select the root r_global
// if (0 == host_id) {
// while (r_global < num_v && used_bp_roots[r_global]) {
// ++r_global;
// }
// if (r_global == num_v) {
// for (VertexID v = 0; v < num_v; ++v) {
// L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST;
// }
// continue;
// }
// }
// // Broadcast the r here.
// message_time -= WallTimer::get_time_mark();
// MPI_Bcast(&r_global,
// 1,
// V_ID_Type,
// 0,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// used_bp_roots[r_global] = 1;
//#ifdef DEBUG_MESSAGES_ON
// {//test
// if (0 == host_id) {
// printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt);
// }
// }
//#endif
//
//// VertexID que_t0 = 0, que_t1 = 0, que_h = 0;
// fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST);
// fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0));
//
// // Mark the r_global
// if (G.get_master_host_id(r_global) == host_id) {
// tmp_d[G.get_local_vertex_id(r_global)] = 0;
// que[end_que++] = r_global;
// }
// // Select the r_global's 64 neighbors
// {
// // Get r_global's neighbors into buffer_send, rank from low to high.
// VertexID local_degree = G.local_out_degrees[r_global];
// std::vector<VertexID> buffer_send(local_degree);
// if (local_degree) {
// EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1;
// for (VertexID d_i = 0; d_i < local_degree; ++d_i) {
// EdgeID e_i = e_i_start - d_i;
// buffer_send[d_i] = G.out_edges[e_i];
// }
// }
//
// // Get selected neighbors (up to 64)
// std::vector<VertexID> selected_nbrs;
// if (0 != host_id) {
// // Every host other than 0 sends neighbors to host 0
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// 0,
// SENDING_ROOT_NEIGHBORS,
// SENDING_SIZE_ROOT_NEIGHBORS);
// // Receive selected neighbors from host 0
// MPI_Instance::recv_buffer_from_src(selected_nbrs,
// 0,
// SENDING_SELECTED_NEIGHBORS,
// SENDING_SIZE_SELETED_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
// } else {
// // Host 0
// // Host 0 receives neighbors from others
// std::vector<VertexID> all_nbrs(buffer_send);
// std::vector<VertexID > buffer_recv;
// for (int loc = 0; loc < num_hosts - 1; ++loc) {
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::recv_buffer_from_any(buffer_recv,
// SENDING_ROOT_NEIGHBORS,
// SENDING_SIZE_ROOT_NEIGHBORS);
//// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv,
//// num_hosts,
//// SENDING_ROOT_NEIGHBORS);
// message_time += WallTimer::get_time_mark();
// if (buffer_recv.empty()) {
// continue;
// }
//
// buffer_send.resize(buffer_send.size() + buffer_recv.size());
// std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin());
// all_nbrs.resize(buffer_send.size());
// all_nbrs.assign(buffer_send.begin(), buffer_send.end());
// }
// assert(all_nbrs.size() == G.get_global_out_degree(r_global));
// // Select 64 (or less) neighbors
// VertexID ns = 0; // number of selected neighbor, default 64
// for (VertexID v_global : all_nbrs) {
// if (used_bp_roots[v_global]) {
// continue;
// }
// used_bp_roots[v_global] = 1;
// selected_nbrs.push_back(v_global);
// if (++ns == 64) {
// break;
// }
// }
// // Send selected neighbors to other hosts
// message_time -= WallTimer::get_time_mark();
// for (int dest = 1; dest < num_hosts; ++dest) {
// MPI_Instance::send_buffer_2_dst(selected_nbrs,
// dest,
// SENDING_SELECTED_NEIGHBORS,
// SENDING_SIZE_SELETED_NEIGHBORS);
// }
// message_time += WallTimer::get_time_mark();
// }
//// {//test
//// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size());
//// }
//
// // Synchronize the used_bp_roots.
// for (VertexID v_global : selected_nbrs) {
// used_bp_roots[v_global] = 1;
// }
//
// // Mark selected neighbors
// for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) {
// VertexID v_global = selected_nbrs[v_i];
// if (host_id != G.get_master_host_id(v_global)) {
// continue;
// }
// tmp_que[end_tmp_que++] = v_global;
// tmp_d[G.get_local_vertex_id(v_global)] = 1;
// tmp_s[v_global].first = 1ULL << v_i;
// }
// }
//
// // Reduce the global number of active vertices
// VertexID global_num_actives = 1;
// UnweightedDist d = 0;
// while (global_num_actives) {
//// for (UnweightedDist d = 0; que_t0 < que_h; ++d) {
// VertexID num_sibling_es = 0, num_child_es = 0;
//
//
// // Send active masters to mirrors
// {
// std::vector<MsgUnitBP> buffer_send(end_que);
// for (VertexID que_i = 0; que_i < end_que; ++que_i) {
// VertexID v_global = que[que_i];
// buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second);
// }
//// {// test
//// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size());
//// }
//
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<MsgUnitBP> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const MsgUnitBP &m : buffer_recv) {
// VertexID v_global = m.v_global;
// if (!G.local_out_degrees[v_global]) {
// continue;
// }
// tmp_s[v_global].first = m.S_n1;
// tmp_s[v_global].second = m.S_0;
// // Push labels
// bit_parallel_push_labels(G,
// v_global,
// tmp_que,
// end_tmp_que,
// sibling_es,
// num_sibling_es,
// child_es,
// num_child_es,
// tmp_d,
// d);
// }
//// {// test
//// printf("host_id: %u root: %u done push.\n", host_id, root);
//// }
// }
// }
//
// // Update the sets in tmp_s
// {
//
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first, w = sibling_es[i].second;
// tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!!
// tmp_s[w].second |= tmp_s[v].first;
//
// }
// // Put into the buffer sending to others
// std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es);
//// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1);
// for (VertexID i = 0; i < num_sibling_es; ++i) {
// VertexID v = sibling_es[i].first;
// VertexID w = sibling_es[i].second;
//// buffer_send.emplace_back(v, tmp_s[v].second);
//// buffer_send.emplace_back(w, tmp_s[w].second);
// buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second);
// buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second);
// }
// // Send the messages
// for (int root = 0; root < num_hosts; ++root) {
// std::vector< std::pair<VertexID, uint64_t> > buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const std::pair<VertexID, uint64_t> &m : buffer_recv) {
// tmp_s[m.first].second |= m.second;
// }
// }
// for (VertexID i = 0; i < num_child_es; ++i) {
// VertexID v = child_es[i].first, c = child_es[i].second;
// tmp_s[c].first |= tmp_s[v].first;
// tmp_s[c].second |= tmp_s[v].second;
// }
// }
////#ifdef DEBUG_MESSAGES_ON
// {// test
// VertexID global_num_sibling_es;
// VertexID global_num_child_es;
// MPI_Allreduce(&num_sibling_es,
// &global_num_sibling_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// MPI_Allreduce(&num_child_es,
// &global_num_child_es,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// if (0 == host_id) {
// printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es);
// }
// }
////#endif
//
// // Swap que and tmp_que
// tmp_que.swap(que);
// end_que = end_tmp_que;
// end_tmp_que = 0;
// MPI_Allreduce(&end_que,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
//
//// }
// ++d;
// }
//
// for (VertexID v_local = 0; v_local < num_masters; ++v_local) {
// VertexID v_global = G.get_global_vertex_id(v_local);
// L[v_local].bp_dist[i_bpspt] = tmp_d[v_local];
// L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1}
// L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1}
// }
// }
//}
//// Function bit parallel checking:
//// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking(
// VertexID v_id,
// VertexID w_id,
// const std::vector<IndexType> &L,
// UnweightedDist iter)
//{
// // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already
// const IndexType &Lv = L[v_id];
// const IndexType &Lw = L[w_id];
//
// _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0);
// _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0);
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF.
// if (td - 2 <= iter) {
// td +=
// (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 :
// ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) |
// (Lv.bp_sets[i][1] & Lw.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
//// ++bp_hit_count;
// return false;
// }
// }
// }
// return true;
//}
// Function for initializing at the begin of a batch
// For a batch, initialize the temporary labels and real labels of roots;
// traverse roots' labels to initialize distance buffer;
// unset flag arrays is_active and got_labels
template <VertexID BATCH_SIZE>
inline VertexID DistBVCPLL<BATCH_SIZE>::
initialization(
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
// VertexID b_id,
VertexID roots_start,
VertexID roots_size,
// std::vector<VertexID> &roots_master_local,
const std::vector<uint8_t> &used_bp_roots)
{
// Get the roots_master_local, containing all local roots.
std::vector<VertexID> roots_master_local;
VertexID size_roots_master_local;
VertexID roots_bound = roots_start + roots_size;
try {
for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) {
roots_master_local.push_back(G.get_local_vertex_id(r_global));
// {//test
// if (1024 == roots_start && 7 == host_id && 31600 == *roots_master_local.rbegin()) {
// printf("S0.0 host_id: %d "
// "31600 YES!\n",
// host_id);
// }
// }
}
}
size_roots_master_local = roots_master_local.size();
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_roots_master_local: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Short_index
{
if (end_once_candidated_queue >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local = once_candidated_queue[v_i];
short_index[v_local].indicator_reset();
once_candidated[v_local] = 0;
}
} else {
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local = once_candidated_queue[v_i];
short_index[v_local].indicator_reset();
once_candidated[v_local] = 0;
}
}
end_once_candidated_queue = 0;
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
}
} else {
for (VertexID r_local : roots_master_local) {
short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
}
}
}
//
// Real Index
try
{
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
IndexType &Lr = L[r_local];
Lr.batches.emplace_back(
// b_id, // Batch ID
Lr.distances.size(), // start_index
1); // size
Lr.distances.emplace_back(
Lr.vertices.size(), // start_index
1, // size
0); // dist
Lr.vertices.push_back(G.get_global_vertex_id(r_local));
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
}
} else {
for (VertexID r_local : roots_master_local) {
IndexType &Lr = L[r_local];
Lr.batches.emplace_back(
// b_id, // Batch ID
Lr.distances.size(), // start_index
1); // size
Lr.distances.emplace_back(
Lr.vertices.size(), // start_index
1, // size
0); // dist
Lr.vertices.push_back(G.get_global_vertex_id(r_local));
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_real_index: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Dist Table
try
{
// struct LabelTableUnit {
// VertexID root_id;
// VertexID label_global_id;
// UnweightedDist dist;
//
// LabelTableUnit() = default;
//
// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
// root_id(r), label_global_id(l), dist(d) {}
// };
std::vector<LabelTableUnit> buffer_send; // buffer for sending
// Dist_matrix
{
// Deprecated Old method: unpack the IndexType structure before sending.
// Okay, it's back.
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
// Offsets for adding labels to buffer_send in parallel
std::vector<VertexID> offsets_beffer_send(size_roots_master_local);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
offsets_beffer_send[i_r] = L[r_local].vertices.size();
}
EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send);
buffer_send.resize(size_labels);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
VertexID top_location = 0;
IndexType &Lr = L[r_local];
VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
VertexID b_i_bound = Lr.batches.size();
_mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// Traverse batches array
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
VertexID dist_start_index = Lr.batches[b_i].start_index;
VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// Traverse distances array
for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID dist_bound_index = Lr.distances.size();
// for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
VertexID v_start_index = Lr.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
UnweightedDist dist = Lr.distances[dist_i].dist;
// Traverse vertices array
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// Write into the dist_table
// buffer_send[offsets_beffer_send[i_r] + top_location++] =
// LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist);
buffer_send[offsets_beffer_send[i_r] + top_location++] =
LabelTableUnit(r_root_id, Lr.vertices[v_i], dist);
}
}
}
}
} else {
for (VertexID r_local : roots_master_local) {
// The distance table.
IndexType &Lr = L[r_local];
VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
VertexID b_i_bound = Lr.batches.size();
_mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// Traverse batches array
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
VertexID dist_start_index = Lr.batches[b_i].start_index;
VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// Traverse distances array
for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID dist_bound_index = Lr.distances.size();
// for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
VertexID v_start_index = Lr.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
UnweightedDist dist = Lr.distances[dist_i].dist;
// Traverse vertices array
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// Write into the dist_table
buffer_send.emplace_back(r_root_id, Lr.vertices[v_i],
dist); // buffer for sending
// buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset,
// dist); // buffer for sending
}
}
}
}
}
}
// Broadcast local roots labels
for (int root = 0; root < num_hosts; ++root) {
std::vector<LabelTableUnit> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
EdgeID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
std::vector<VertexID> sizes_recved_root_labels(roots_size, 0);
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const LabelTableUnit &l = buffer_recv[i_l];
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
UnweightedDist dist = l.dist;
dist_table[root_id][label_global_id] = dist;
// Record root_id's number of its received label, for later adding to recved_dist_table
__atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST);
// recved_dist_table[root_id].push_back(label_global_id);
}
// Record the received label in recved_dist_table, for later reset
#pragma omp parallel for
for (VertexID root_id = 0; root_id < roots_size; ++root_id) {
VertexID &size = sizes_recved_root_labels[root_id];
if (size) {
recved_dist_table[root_id].resize(size);
size = 0;
}
}
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const LabelTableUnit &l = buffer_recv[i_l];
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id);
}
} else {
for (const LabelTableUnit &l : buffer_recv) {
VertexID root_id = l.root_id;
VertexID label_global_id = l.label_global_id;
UnweightedDist dist = l.dist;
dist_table[root_id][label_global_id] = dist;
// Record the received label in recved_dist_table, for later reset
recved_dist_table[root_id].push_back(label_global_id);
}
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_dist_table: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Build the Bit-Parallel Labels Table
try
{
// struct MsgBPLabel {
// VertexID r_root_id;
// UnweightedDist bp_dist[BITPARALLEL_SIZE];
// uint64_t bp_sets[BITPARALLEL_SIZE][2];
//
// MsgBPLabel() = default;
// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
// : r_root_id(r)
// {
// memcpy(bp_dist, dist, sizeof(bp_dist));
// memcpy(bp_sets, sets, sizeof(bp_sets));
// }
// };
// std::vector<MPI_Request> requests_send(num_hosts - 1);
std::vector<MsgBPLabel> buffer_send;
std::vector<VertexID> roots_queue;
for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
if (G.get_master_host_id(r_global) != host_id) {
continue;
}
roots_queue.push_back(r_global);
}
VertexID size_roots_queue = roots_queue.size();
if (size_roots_queue >= THRESHOLD_PARALLEL) {
buffer_send.resize(size_roots_queue);
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) {
VertexID r_global = roots_queue[i_r];
VertexID r_local = G.get_local_vertex_id(r_global);
VertexID r_root = r_global - roots_start;
// Prepare for sending
// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
}
} else {
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) != host_id) {
// continue;
// }
for (VertexID r_global : roots_queue) {
VertexID r_local = G.get_local_vertex_id(r_global);
VertexID r_root = r_global - roots_start;
// Local roots
// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// Prepare for sending
buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
}
}
for (int root = 0; root < num_hosts; ++root) {
std::vector<MsgBPLabel> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
VertexID size_buffer_recv = buffer_recv.size();
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) {
const MsgBPLabel &m = buffer_recv[i_m];
VertexID r_root = m.r_root_id;
memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
}
} else {
for (const MsgBPLabel &m : buffer_recv) {
VertexID r_root = m.r_root_id;
memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
}
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("initialization_bp_labels_table: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Active_queue
VertexID global_num_actives = 0; // global number of active vertices.
{
if (size_roots_master_local >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) {
VertexID r_local = roots_master_local[i_r];
active_queue[i_r] = r_local;
}
end_active_queue = size_roots_master_local;
} else {
for (VertexID r_local : roots_master_local) {
active_queue[end_active_queue++] = r_local;
}
{
}
}
// Get the global number of active vertices;
// message_time -= WallTimer::get_time_mark();
MPI_Allreduce(&end_active_queue,
&global_num_actives,
1,
V_ID_Type,
// MPI_SUM,
MPI_MAX,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
}
return global_num_actives;
}
// Sequential Version
//// Function for initializing at the begin of a batch
//// For a batch, initialize the temporary labels and real labels of roots;
//// traverse roots' labels to initialize distance buffer;
//// unset flag arrays is_active and got_labels
//template <VertexID BATCH_SIZE>
//inline VertexID DistBVCPLL<BATCH_SIZE>::
//initialization(
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector< std::vector<UnweightedDist> > &dist_table,
// std::vector< std::vector<VertexID> > &recved_dist_table,
// std::vector<BPLabelType> &bp_labels_table,
// std::vector<VertexID> &active_queue,
// VertexID &end_active_queue,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<uint8_t> &once_candidated,
// VertexID b_id,
// VertexID roots_start,
// VertexID roots_size,
//// std::vector<VertexID> &roots_master_local,
// const std::vector<uint8_t> &used_bp_roots)
//{
// // Get the roots_master_local, containing all local roots.
// std::vector<VertexID> roots_master_local;
// VertexID roots_bound = roots_start + roots_size;
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) {
// roots_master_local.push_back(G.get_local_vertex_id(r_global));
// }
// }
// // Short_index
// {
// for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
// VertexID v_local = once_candidated_queue[v_i];
// short_index[v_local].indicator_reset();
// once_candidated[v_local] = 0;
// }
// end_once_candidated_queue = 0;
// for (VertexID r_local : roots_master_local) {
// short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself
// short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels
//// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself
//// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels
// }
// }
////
// // Real Index
// {
// for (VertexID r_local : roots_master_local) {
// IndexType &Lr = L[r_local];
// Lr.batches.emplace_back(
// b_id, // Batch ID
// Lr.distances.size(), // start_index
// 1); // size
// Lr.distances.emplace_back(
// Lr.vertices.size(), // start_index
// 1, // size
// 0); // dist
// Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start);
// }
// }
//
// // Dist Table
// {
//// struct LabelTableUnit {
//// VertexID root_id;
//// VertexID label_global_id;
//// UnweightedDist dist;
////
//// LabelTableUnit() = default;
////
//// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) :
//// root_id(r), label_global_id(l), dist(d) {}
//// };
// std::vector<LabelTableUnit> buffer_send; // buffer for sending
// // Dist_matrix
// {
// // Deprecated Old method: unpack the IndexType structure before sending.
// for (VertexID r_local : roots_master_local) {
// // The distance table.
// IndexType &Lr = L[r_local];
// VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// // Traverse batches array
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// // Traverse distances array
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID v_start_index = Lr.distances[dist_i].start_index;
// VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
// UnweightedDist dist = Lr.distances[dist_i].dist;
// // Traverse vertices array
// for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// // Write into the dist_table
//// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table
// buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset,
// dist); // buffer for sending
// }
// }
// }
// }
// }
// // Broadcast local roots labels
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<LabelTableUnit> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const LabelTableUnit &l : buffer_recv) {
// VertexID root_id = l.root_id;
// VertexID label_global_id = l.label_global_id;
// UnweightedDist dist = l.dist;
// dist_table[root_id][label_global_id] = dist;
// // Record the received label in recved_dist_table, for later reset
// recved_dist_table[root_id].push_back(label_global_id);
// }
// }
// }
//
// // Build the Bit-Parallel Labels Table
// {
//// struct MsgBPLabel {
//// VertexID r_root_id;
//// UnweightedDist bp_dist[BITPARALLEL_SIZE];
//// uint64_t bp_sets[BITPARALLEL_SIZE][2];
////
//// MsgBPLabel() = default;
//// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2])
//// : r_root_id(r)
//// {
//// memcpy(bp_dist, dist, sizeof(bp_dist));
//// memcpy(bp_sets, sets, sizeof(bp_sets));
//// }
//// };
//// std::vector<MPI_Request> requests_send(num_hosts - 1);
// std::vector<MsgBPLabel> buffer_send;
// for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) {
// if (G.get_master_host_id(r_global) != host_id) {
// continue;
// }
// VertexID r_local = G.get_local_vertex_id(r_global);
// VertexID r_root = r_global - roots_start;
// // Local roots
//// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
//// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// // Prepare for sending
// buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets);
// }
//
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<MsgBPLabel> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const MsgBPLabel &m : buffer_recv) {
// VertexID r_root = m.r_root_id;
// memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist));
// memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets));
// }
// }
// }
//
// // TODO: parallel enqueue
// // Active_queue
// VertexID global_num_actives = 0; // global number of active vertices.
// {
// for (VertexID r_local : roots_master_local) {
// active_queue[end_active_queue++] = r_local;
// }
// // Get the global number of active vertices;
// message_time -= WallTimer::get_time_mark();
// MPI_Allreduce(&end_active_queue,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// }
//
// return global_num_actives;
//}
//// Function: push v_head_global's newly added labels to its all neighbors.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//push_single_label(
// VertexID v_head_global,
// VertexID label_root_id,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter)
//{
// const BPLabelType &L_label = bp_labels_table[label_root_id];
// VertexID label_global_id = label_root_id + roots_start;
// EdgeID e_i_start = G.vertices_idx[v_head_global];
// EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
// for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
// VertexID v_tail_global = G.out_edges[e_i];
// if (used_bp_roots[v_tail_global]) {
// continue;
// }
// if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it.
// return;
// }
//
// VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
// const IndexType &L_tail = L[v_tail_local];
// if (v_tail_global <= label_global_id) {
// // remaining v_tail_global has higher rank than the label
// return;
// }
// ShortIndex &SI_v_tail = short_index[v_tail_local];
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator.set(label_root_id);
// // Add into once_candidated_queue
//
// if (!once_candidated[v_tail_local]) {
// // If v_tail_global is not in the once_candidated_queue yet, add it in
// once_candidated[v_tail_local] = true;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
// }
// // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// // ++total_check_count;
//// const IndexType &L_label = L[label_global_id];
//// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
//// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
//// bp_checking_ins_count.measure_start();
// bool no_need_add = false;
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
// if (td - 2 <= iter) {
// td +=
// (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
// ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
// (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
// no_need_add = true;
//// ++bp_hit_count;
// break;
// }
// }
// }
// if (no_need_add) {
//// bp_checking_ins_count.measure_stop();
// continue;
// }
//// bp_checking_ins_count.measure_stop();
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = true;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
//
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = true;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
// }
//// {// Just for the complain from the compiler
//// assert(iter >= iter);
//// }
//}
template<VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
schedule_label_pushing_para(
const DistGraph &G,
const VertexID roots_start,
const std::vector<uint8_t> &used_bp_roots,
const std::vector<VertexID> &active_queue,
const VertexID global_start,
const VertexID global_size,
const VertexID local_size,
// const VertexID start_active_queue,
// const VertexID size_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
const std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
std::vector<uint8_t> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const UnweightedDist iter)
{
std::vector<std::pair<VertexID, VertexID> > buffer_send_indices;
//.first: Vertex ID
//.second: size of labels
std::vector<VertexID> buffer_send_labels;
if (local_size) {
const VertexID start_active_queue = global_start;
const VertexID size_active_queue = global_size <= local_size ?
global_size :
local_size;
const VertexID bound_active_queue = start_active_queue + size_active_queue;
buffer_send_indices.resize(size_active_queue);
// Prepare offset for inserting
std::vector<VertexID> offsets_buffer_locs(size_active_queue);
#pragma omp parallel for
for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
VertexID v_head_local = active_queue[i_q];
is_active[v_head_local] = 0; // reset is_active
const IndexType &Lv = L[v_head_local];
offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size;
}
EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
try {
buffer_send_labels.resize(size_buffer_send_labels);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("schedule_label_pushing_para.buffer_send_labels: bad_alloc "
"host_id: %d "
"size_buffer_send_labels: %lu "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
size_buffer_send_labels,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// Build buffer_send_labels by parallel inserting
#pragma omp parallel for
for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
VertexID v_head_local = active_queue[i_q];
is_active[v_head_local] = 0; // reset is_active
VertexID v_head_global = G.get_global_vertex_id(v_head_local);
const IndexType &Lv = L[v_head_local];
// Prepare the buffer_send_indices
VertexID tmp_i_q = i_q - start_active_queue;
buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// These 2 index are used for traversing v_head's last inserted labels
VertexID l_i_start = Lv.distances.rbegin()->start_index;
VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
VertexID top_labels = offsets_buffer_locs[tmp_i_q];
for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
VertexID label_root_id = Lv.vertices[l_i] - roots_start;
buffer_send_labels[top_labels++] = label_root_id;
// buffer_send_labels.push_back(label_root_id);
}
}
}
////////////////////////////////////////////////
////
// const VertexID bound_active_queue = start_active_queue + size_active_queue;
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// // Parallel Version
// // Prepare offset for inserting
// std::vector<VertexID> offsets_buffer_locs(size_active_queue);
//#pragma omp parallel for
// for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// const IndexType &Lv = L[v_head_local];
// offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size;
// }
// EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs);
//// {// test
//// if (0 == host_id) {
//// double memtotal = 0;
//// double memfree = 0;
//// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID);
//// PADO::Utils::system_memory(memtotal, memfree);
//// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n",
//// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024);
//// }
//// }
// buffer_send_labels.resize(size_buffer_send_labels);
//// {// test
//// if (0 == host_id) {
//// printf("buffer_send_labels created.\n");
//// }
//// }
//
// // Build buffer_send_labels by parallel inserting
//#pragma omp parallel for
// for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) {
// VertexID tmp_i_q = i_q - start_active_queue;
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// VertexID top_labels = offsets_buffer_locs[tmp_i_q];
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels[top_labels++] = label_root_id;
//// buffer_send_labels.push_back(label_root_id);
// }
// }
//// end_active_queue = 0;
////
////////////////////////////////////////////////
for (int root = 0; root < num_hosts; ++root) {
// Get the indices
std::vector<std::pair<VertexID, VertexID> > indices_buffer;
one_host_bcasts_buffer_to_buffer(root,
buffer_send_indices,
indices_buffer);
if (indices_buffer.empty()) {
continue;
}
// Get the labels
std::vector<VertexID> labels_buffer;
one_host_bcasts_buffer_to_buffer(root,
buffer_send_labels,
labels_buffer);
VertexID size_indices_buffer = indices_buffer.size();
// Prepare the offsets for reading indices_buffer
std::vector<EdgeID> starts_locs_index(size_indices_buffer);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
starts_locs_index[i_i] = e.second;
}
EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index);
// Prepare the offsets for inserting v_tails into queue
std::vector<VertexID> offsets_tmp_queue(size_indices_buffer);
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
const std::pair<VertexID, VertexID> &e = indices_buffer[i_i];
offsets_tmp_queue[i_i] = G.local_out_degrees[e.first];
}
EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue);
std::vector<VertexID> tmp_got_candidates_queue;
std::vector<VertexID> sizes_tmp_got_candidates_queue;
std::vector<VertexID> tmp_once_candidated_queue;
std::vector<VertexID> sizes_tmp_once_candidated_queue;
try {
tmp_got_candidates_queue.resize(num_ngbrs);
sizes_tmp_got_candidates_queue.resize(size_indices_buffer, 0);
tmp_once_candidated_queue.resize(num_ngbrs);
sizes_tmp_once_candidated_queue.resize(size_indices_buffer, 0);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("schedule_label_pushing_para.tmp_queues: bad_alloc "
"host_id: %d "
"num_ngbrs: %lu "
"size_indices_buffer: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
num_ngbrs,
size_indices_buffer,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
#pragma omp parallel for
for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) {
VertexID v_head_global = indices_buffer[i_i].first;
EdgeID start_index = starts_locs_index[i_i];
EdgeID bound_index = i_i != size_indices_buffer - 1 ?
starts_locs_index[i_i + 1] : total_recved_labels;
if (G.local_out_degrees[v_head_global]) {
local_push_labels_para(
v_head_global,
start_index,
bound_index,
roots_start,
labels_buffer,
G,
short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
tmp_got_candidates_queue,
sizes_tmp_got_candidates_queue[i_i],
offsets_tmp_queue[i_i],
got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
tmp_once_candidated_queue,
sizes_tmp_once_candidated_queue[i_i],
once_candidated,
bp_labels_table,
used_bp_roots,
iter);
}
}
{// Collect elements from tmp_got_candidates_queue to got_candidates_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue);
PADO::collect_into_queue(
tmp_got_candidates_queue,
offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue
sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue
total_new,
got_candidates_queue,
end_got_candidates_queue);
}
{// Collect elements from tmp_once_candidated_queue to once_candidated_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue);
PADO::collect_into_queue(
tmp_once_candidated_queue,
offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue
sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue
total_new,
once_candidated_queue,
end_once_candidated_queue);
}
}
}
// Function: pushes v_head's labels to v_head's every (master) neighbor
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
local_push_labels_para(
const VertexID v_head_global,
const EdgeID start_index,
const EdgeID bound_index,
const VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
std::vector<VertexID> &tmp_got_candidates_queue,
VertexID &size_tmp_got_candidates_queue,
const VertexID offset_tmp_queue,
std::vector<uint8_t> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
std::vector<VertexID> &tmp_once_candidated_queue,
VertexID &size_tmp_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter)
{
// Traverse v_head's every neighbor v_tail
EdgeID e_i_start = G.vertices_idx[v_head_global];
EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
VertexID v_tail_global = G.out_edges[e_i];
if (used_bp_roots[v_tail_global]) {
continue;
}
if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
return;
}
VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
const IndexType &L_tail = L[v_tail_local];
ShortIndex &SI_v_tail = short_index[v_tail_local];
// Traverse v_head's last inserted labels
for (VertexID l_i = start_index; l_i < bound_index; ++l_i) {
VertexID label_root_id = labels_buffer[l_i];
VertexID label_global_id = label_root_id + roots_start;
if (v_tail_global <= label_global_id) {
// v_tail_global has higher rank than the label
continue;
}
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator[label_root_id] = 1;
{// Deal with race condition
if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0),
static_cast<uint8_t>(1))) {
// The label is already selected before
continue;
}
}
// Add into once_candidated_queue
if (!once_candidated[v_tail_local]) {
// If v_tail_global is not in the once_candidated_queue yet, add it in
if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local;
}
// once_candidated[v_tail_local] = 1;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
}
// Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// const IndexType &L_label = L[label_global_id];
// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
const BPLabelType &L_label = bp_labels_table[label_root_id];
bool no_need_add = false;
for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
if (td - 2 <= iter) {
td +=
(L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
(L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
? -1 : 0;
if (td <= iter) {
no_need_add = true;
break;
}
}
}
if (no_need_add) {
continue;
}
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = 1;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
if (!SI_v_tail.is_candidate[label_root_id]) {
if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id);
}
}
// Add into got_candidates queue
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = 1;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
if (!got_candidates[v_tail_local]) {
if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) {
tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local;
}
}
}
}
// {
// assert(iter >= iter);
// }
}
// Function: pushes v_head's labels to v_head's every (master) neighbor
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
local_push_labels_seq(
VertexID v_head_global,
EdgeID start_index,
EdgeID bound_index,
VertexID roots_start,
const std::vector<VertexID> &labels_buffer,
const DistGraph &G,
std::vector<ShortIndex> &short_index,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated,
const std::vector<BPLabelType> &bp_labels_table,
const std::vector<uint8_t> &used_bp_roots,
const UnweightedDist iter)
{
// Traverse v_head's every neighbor v_tail
EdgeID e_i_start = G.vertices_idx[v_head_global];
EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
VertexID v_tail_global = G.out_edges[e_i];
if (used_bp_roots[v_tail_global]) {
continue;
}
if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
return;
}
// Traverse v_head's last inserted labels
for (VertexID l_i = start_index; l_i < bound_index; ++l_i) {
VertexID label_root_id = labels_buffer[l_i];
VertexID label_global_id = label_root_id + roots_start;
if (v_tail_global <= label_global_id) {
// v_tail_global has higher rank than the label
continue;
}
VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
const IndexType &L_tail = L[v_tail_local];
ShortIndex &SI_v_tail = short_index[v_tail_local];
if (SI_v_tail.indicator[label_root_id]) {
// The label is already selected before
continue;
}
// Record label_root_id as once selected by v_tail_global
SI_v_tail.indicator[label_root_id] = 1;
// SI_v_tail.indicator.set(label_root_id);
// Add into once_candidated_queue
if (!once_candidated[v_tail_local]) {
// If v_tail_global is not in the once_candidated_queue yet, add it in
once_candidated[v_tail_local] = 1;
once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
}
// Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// const IndexType &L_label = L[label_global_id];
// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
const BPLabelType &L_label = bp_labels_table[label_root_id];
bool no_need_add = false;
for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
if (td - 2 <= iter) {
td +=
(L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
(L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
? -1 : 0;
if (td <= iter) {
no_need_add = true;
break;
}
}
}
if (no_need_add) {
continue;
}
if (SI_v_tail.is_candidate[label_root_id]) {
continue;
}
SI_v_tail.is_candidate[label_root_id] = 1;
SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
if (!got_candidates[v_tail_local]) {
// If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
got_candidates[v_tail_local] = 1;
got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
}
}
}
// {
// assert(iter >= iter);
// }
}
//// Function: pushes v_head's labels to v_head's every (master) neighbor
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//local_push_labels(
// VertexID v_head_local,
// VertexID roots_start,
// const DistGraph &G,
// std::vector<ShortIndex> &short_index,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<bool> &got_candidates,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<bool> &once_candidated,
// const std::vector<BPLabelType> &bp_labels_table,
// const std::vector<uint8_t> &used_bp_roots,
// UnweightedDist iter)
//{
// // The data structure of a message
//// std::vector< LabelUnitType > buffer_recv;
// const IndexType &Lv = L[v_head_local];
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin() -> start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size;
// // Traverse v_head's every neighbor v_tail
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// EdgeID e_i_start = G.vertices_idx[v_head_global];
// EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global];
// for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) {
// VertexID v_tail_global = G.out_edges[e_i];
// if (used_bp_roots[v_tail_global]) {
// continue;
// }
// if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it.
// return;
// }
//
// // Traverse v_head's last inserted labels
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// VertexID label_global_id = label_root_id + roots_start;
// if (v_tail_global <= label_global_id) {
// // v_tail_global has higher rank than the label
// continue;
// }
// VertexID v_tail_local = G.get_local_vertex_id(v_tail_global);
// const IndexType &L_tail = L[v_tail_local];
// ShortIndex &SI_v_tail = short_index[v_tail_local];
// if (SI_v_tail.indicator[label_root_id]) {
// // The label is already selected before
// continue;
// }
// // Record label_root_id as once selected by v_tail_global
// SI_v_tail.indicator.set(label_root_id);
// // Add into once_candidated_queue
//
// if (!once_candidated[v_tail_local]) {
// // If v_tail_global is not in the once_candidated_queue yet, add it in
// once_candidated[v_tail_local] = true;
// once_candidated_queue[end_once_candidated_queue++] = v_tail_local;
// }
//
// // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already
// // ++total_check_count;
//// const IndexType &L_label = L[label_global_id];
//// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0);
//// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0);
//// bp_checking_ins_count.measure_start();
// const BPLabelType &L_label = bp_labels_table[label_root_id];
// bool no_need_add = false;
// for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) {
// VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i];
// if (td - 2 <= iter) {
// td +=
// (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 :
// ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) |
// (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0]))
// ? -1 : 0;
// if (td <= iter) {
// no_need_add = true;
//// ++bp_hit_count;
// break;
// }
// }
// }
// if (no_need_add) {
//// bp_checking_ins_count.measure_stop();
// continue;
// }
//// bp_checking_ins_count.measure_stop();
// if (SI_v_tail.is_candidate[label_root_id]) {
// continue;
// }
// SI_v_tail.is_candidate[label_root_id] = true;
// SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id;
//
// if (!got_candidates[v_tail_local]) {
// // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate)
// got_candidates[v_tail_local] = true;
// got_candidates_queue[end_got_candidates_queue++] = v_tail_local;
// }
// }
// }
//
// {
// assert(iter >= iter);
// }
//}
//// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts
//// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all
//// code of this function into the caller, all messages become right.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//sync_masters_2_mirrors(
// const DistGraph &G,
// const std::vector<VertexID> &active_queue,
// VertexID end_active_queue,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send,
// std::vector<MPI_Request> &requests_send
//)
//{
//// std::vector< std::pair<VertexID, VertexID> > buffer_send;
// // pair.first: Owener vertex ID of the label
// // pair.first: label vertex ID of the label
// // Prepare masters' newly added labels for sending
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send.emplace_back(v_head_global, label_root_id);
//// {//test
//// if (1 == host_id) {
//// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);//
//// }
//// }
// }
// }
// {
// if (!buffer_send.empty()) {
// printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second);
// }
// assert(!requests_send.empty());
// }
//
// // Send messages
// for (int loc = 0; loc < num_hosts - 1; ++loc) {
// int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc);
// MPI_Isend(buffer_send.data(),
// MPI_Instance::get_sending_size(buffer_send),
// MPI_CHAR,
// dest_host_id,
// SENDING_MASTERS_TO_MIRRORS,
// MPI_COMM_WORLD,
// &requests_send[loc]);
// {
// if (!buffer_send.empty()) {
// printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second);
// }
// }
// }
//}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
schedule_label_inserting_para(
const DistGraph &G,
const VertexID roots_start,
const VertexID roots_size,
std::vector<ShortIndex> &short_index,
const std::vector< std::vector<UnweightedDist> > &dist_table,
const std::vector<VertexID> &got_candidates_queue,
const VertexID start_got_candidates_queue,
const VertexID size_got_candidates_queue,
std::vector<uint8_t> &got_candidates,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<uint8_t> &is_active,
std::vector< std::pair<VertexID, VertexID> > &buffer_send,
const VertexID iter)
{
const VertexID bound_got_candidates_queue = start_got_candidates_queue + size_got_candidates_queue;
std::vector<VertexID> offsets_tmp_active_queue;
std::vector<VertexID> tmp_active_queue;
std::vector<VertexID> sizes_tmp_active_queue;
std::vector<EdgeID> offsets_tmp_buffer_send;
std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send;
std::vector<EdgeID> sizes_tmp_buffer_send;
EdgeID total_send_labels;
try {
offsets_tmp_active_queue.resize(size_got_candidates_queue);
#pragma omp parallel for
for (VertexID i_q = 0; i_q < size_got_candidates_queue; ++i_q) {
offsets_tmp_active_queue[i_q] = i_q;
}
tmp_active_queue.resize(size_got_candidates_queue);
sizes_tmp_active_queue.resize(size_got_candidates_queue,
0); // Size will only be 0 or 1, but it will become offsets eventually.
// Prepare for parallel buffer_send
// std::vector<EdgeID> offsets_tmp_buffer_send(size_got_candidates_queue);
offsets_tmp_buffer_send.resize(size_got_candidates_queue);
#pragma omp parallel for
for (VertexID i_q = start_got_candidates_queue; i_q < bound_got_candidates_queue; ++i_q) {
VertexID v_id_local = got_candidates_queue[i_q];
VertexID v_global_id = G.get_global_vertex_id(v_id_local);
VertexID tmp_i_q = i_q - start_got_candidates_queue;
if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) {
// If v_global_id is root, its new labels should be put into buffer_send
offsets_tmp_buffer_send[tmp_i_q] = short_index[v_id_local].end_candidates_que;
} else {
offsets_tmp_buffer_send[tmp_i_q] = 0;
}
}
total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send);
tmp_buffer_send.resize(total_send_labels);
sizes_tmp_buffer_send.resize(size_got_candidates_queue, 0);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("L%u_tmp_buffer_send: bad_alloc "
"host_id: %d "
"iter: %u "
"size_got_candidates_queue: %u "
"total_send_labels: %lu "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
__LINE__,
host_id,
iter,
size_got_candidates_queue,
total_send_labels,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
#pragma omp parallel for
for (VertexID i_queue = start_got_candidates_queue; i_queue < bound_got_candidates_queue; ++i_queue) {
VertexID v_id_local = got_candidates_queue[i_queue];
VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
got_candidates[v_id_local] = 0; // reset got_candidates
// Traverse v_id's all candidates
VertexID tmp_i_queue = i_queue - start_got_candidates_queue;
VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
short_index[v_id_local].is_candidate[cand_root_id] = 0;
// Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
if (distance_query(
cand_root_id,
v_id_local,
roots_start,
// L,
dist_table,
iter)) {
if (!is_active[v_id_local]) {
is_active[v_id_local] = 1;
// active_queue[end_active_queue++] = v_id_local;
tmp_active_queue[tmp_i_queue + sizes_tmp_active_queue[tmp_i_queue]++] = v_id_local;
}
++inserted_count;
// The candidate cand_root_id needs to be added into v_id's label
insert_label_only_para(
cand_root_id,
v_id_local,
roots_start,
roots_size,
G,
tmp_buffer_send,
sizes_tmp_buffer_send[tmp_i_queue],
offsets_tmp_buffer_send[tmp_i_queue]);
// buffer_send);
}
}
short_index[v_id_local].end_candidates_que = 0;
if (0 != inserted_count) {
// Update other arrays in L[v_id] if new labels were inserted in this iteration
update_label_indices(
v_id_local,
inserted_count,
// L,
short_index,
// b_id,
iter);
}
}
{// Collect elements from tmp_active_queue to active_queue
VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue);
PADO::collect_into_queue(
tmp_active_queue,
offsets_tmp_active_queue,
sizes_tmp_active_queue,
total_new,
active_queue,
end_active_queue);
}
{// Collect elements from tmp_buffer_send to buffer_send
EdgeID old_size_buffer_send = buffer_send.size();
EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send);
try {
buffer_send.resize(total_new + old_size_buffer_send);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("L%u_buffer_send: bad_alloc "
"iter: %u "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
__LINE__,
iter,
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// EdgeID zero_size = 0;
PADO::collect_into_queue(
tmp_buffer_send,
offsets_tmp_buffer_send,
sizes_tmp_buffer_send,
total_new,
buffer_send,
old_size_buffer_send);
// zero_size);
}
}
// Function for distance query;
// traverse vertex v_id's labels;
// return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label.
template <VertexID BATCH_SIZE>
inline bool DistBVCPLL<BATCH_SIZE>::
distance_query(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
// const std::vector<IndexType> &L,
const std::vector< std::vector<UnweightedDist> > &dist_table,
UnweightedDist iter)
{
VertexID cand_real_id = cand_root_id + roots_start;
const IndexType &Lv = L[v_id_local];
// Traverse v_id's all existing labels
VertexID b_i_bound = Lv.batches.size();
_mm_prefetch(&Lv.batches[0], _MM_HINT_T0);
_mm_prefetch(&Lv.distances[0], _MM_HINT_T0);
_mm_prefetch(&Lv.vertices[0], _MM_HINT_T0);
//_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0);
for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE;
VertexID dist_start_index = Lv.batches[b_i].start_index;
VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size;
// Traverse dist_table
for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID dist_bound_index = Lv.distances.size();
// for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) {
UnweightedDist dist = Lv.distances[dist_i].dist;
// Cannot use this, because no batch_id any more, so distances are not all in order among batches.
if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered.
// If the half path distance is already greater than their targeted distance, jump to next batch
break;
}
VertexID v_start_index = Lv.distances[dist_i].start_index;
VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size;
// _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0);
_mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0);
for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id
VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id
if (v >= cand_real_id) {
// Vertex cand_real_id cannot have labels whose ranks are lower than it,
// in which case dist_table[cand_root_id][v] does not exist.
continue;
}
VertexID d_tmp = dist + dist_table[cand_root_id][v];
if (d_tmp <= iter) {
return false;
}
}
}
}
return true;
}
//// Sequential version
// Function inserts candidate cand_root_id into vertex v_id's labels;
// update the distance buffer dist_table;
// but it only update the v_id's labels' vertices array;
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
insert_label_only_seq(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::pair<VertexID, VertexID> > &buffer_send)
// UnweightedDist iter)
{
try {
VertexID cand_real_id = cand_root_id + roots_start;
L[v_id_local].vertices.push_back(cand_real_id);
// L[v_id_local].vertices.push_back(cand_root_id);
// Update the distance buffer if v_id is a root
VertexID v_id_global = G.get_global_vertex_id(v_id_local);
VertexID v_root_id = v_id_global - roots_start;
if (v_id_global >= roots_start && v_root_id < roots_size) {
// VertexID cand_real_id = cand_root_id + roots_start;
// dist_table[v_root_id][cand_real_id] = iter;
// Put the update into the buffer_send for later sending
buffer_send.emplace_back(v_root_id, cand_real_id);
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("insert_label_only_seq: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
//// Parallel Version
// Function inserts candidate cand_root_id into vertex v_id's labels;
// update the distance buffer dist_table;
// but it only update the v_id's labels' vertices array;
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
insert_label_only_para(
VertexID cand_root_id,
VertexID v_id_local,
VertexID roots_start,
VertexID roots_size,
const DistGraph &G,
// std::vector< std::pair<VertexID, VertexID> > &buffer_send)
std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send,
EdgeID &size_tmp_buffer_send,
const EdgeID offset_tmp_buffer_send)
{
try {
VertexID cand_real_id = cand_root_id + roots_start;
L[v_id_local].vertices.push_back(cand_real_id);
// L[v_id_local].vertices.push_back(cand_root_id);
// Update the distance buffer if v_id is a root
VertexID v_id_global = G.get_global_vertex_id(v_id_local);
VertexID v_root_id = v_id_global - roots_start;
if (v_id_global >= roots_start && v_root_id < roots_size) {
// VertexID cand_real_id = cand_root_id + roots_start;
// Put the update into the buffer_send for later sending
tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id);
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("insert_label_only_para: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Function updates those index arrays in v_id's label only if v_id has been inserted new labels
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
update_label_indices(
const VertexID v_id_local,
const VertexID inserted_count,
// std::vector<IndexType> &L,
std::vector<ShortIndex> &short_index,
// VertexID b_id,
const UnweightedDist iter)
{
try {
IndexType &Lv = L[v_id_local];
// indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch
if (short_index[v_id_local].indicator[BATCH_SIZE]) {
// Increase the batches' last element's size because a new distance element need to be added
++(Lv.batches.rbegin() -> size);
} else {
short_index[v_id_local].indicator[BATCH_SIZE] = 1;
// short_index[v_id_local].indicator.set(BATCH_SIZE);
// Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added
Lv.batches.emplace_back(
// b_id, // batch id
Lv.distances.size(), // start index
1); // size
}
// Insert a new distance element with start_index, size, and dist
Lv.distances.emplace_back(
Lv.vertices.size() - inserted_count, // start index
inserted_count, // size
iter); // distance
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("update_label_indices: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Function to reset dist_table the distance buffer to INF
// Traverse every root's labels to reset its distance buffer elements to INF.
// In this way to reduce the cost of initialization of the next batch.
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
reset_at_end(
const DistGraph &G,
// VertexID roots_start,
// const std::vector<VertexID> &roots_master_local,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
const std::vector<VertexID> &once_candidated_queue,
const VertexID end_once_candidated_queue)
{
// // Reset dist_table according to local masters' labels
// for (VertexID r_local_id : roots_master_local) {
// IndexType &Lr = L[r_local_id];
// VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start;
// VertexID b_i_bound = Lr.batches.size();
// _mm_prefetch(&Lr.batches[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.distances[0], _MM_HINT_T0);
// _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0);
// for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) {
// VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE;
// VertexID dist_start_index = Lr.batches[b_i].start_index;
// VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size;
// // Traverse dist_table
// for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) {
// VertexID v_start_index = Lr.distances[dist_i].start_index;
// VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size;
// for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) {
// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST;
// }
// }
// }
// }
// Reset dist_table according to received masters' labels from other hosts
for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) {
for (VertexID cand_real_id : recved_dist_table[r_root_id]) {
dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST;
}
recved_dist_table[r_root_id].clear();
}
// Reset bit-parallel labels table
for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) {
memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist));
memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets));
}
// Remove labels of local minimum set
for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) {
VertexID v_local_id = once_candidated_queue[v_i];
if (!G.is_local_minimum[v_local_id]) {
continue;
}
L[v_local_id].clean_all_indices();
}
}
template <VertexID BATCH_SIZE>
inline void DistBVCPLL<BATCH_SIZE>::
batch_process(
const DistGraph &G,
// const VertexID b_id,
const VertexID roots_start, // start id of roots
const VertexID roots_size, // how many roots in the batch
const std::vector<uint8_t> &used_bp_roots,
std::vector<VertexID> &active_queue,
VertexID &end_active_queue,
std::vector<VertexID> &got_candidates_queue,
VertexID &end_got_candidates_queue,
std::vector<ShortIndex> &short_index,
std::vector< std::vector<UnweightedDist> > &dist_table,
std::vector< std::vector<VertexID> > &recved_dist_table,
std::vector<BPLabelType> &bp_labels_table,
std::vector<uint8_t> &got_candidates,
// std::vector<bool> &got_candidates,
std::vector<uint8_t> &is_active,
// std::vector<bool> &is_active,
std::vector<VertexID> &once_candidated_queue,
VertexID &end_once_candidated_queue,
std::vector<uint8_t> &once_candidated)
// std::vector<bool> &once_candidated)
{
// At the beginning of a batch, initialize the labels L and distance buffer dist_table;
// initializing_time -= WallTimer::get_time_mark();
// The Maximum of active vertices among hosts.
VertexID global_num_actives = initialization(G,
short_index,
dist_table,
recved_dist_table,
bp_labels_table,
active_queue,
end_active_queue,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
// b_id,
roots_start,
roots_size,
// roots_master_local,
used_bp_roots);
// initializing_time += WallTimer::get_time_mark();
UnweightedDist iter = 0; // The iterator, also the distance for current iteration
// {//test
// if (0 == host_id) {
// printf("host_id: %u initialization finished.\n", host_id);
// }
// }
while (global_num_actives) {
++iter;
{// Limit the distance
if (iter >7 ) {
if (end_active_queue >= THRESHOLD_PARALLEL) {
#pragma omp parallel for
for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
VertexID v_id_local = active_queue[i_q];
is_active[v_id_local] = 0;
}
} else {
for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
VertexID v_id_local = active_queue[i_q];
is_active[v_id_local] = 0;
}
}
end_active_queue = 0;
break;
}
}
//#ifdef DEBUG_MESSAGES_ON
// {//test
//// if (0 == host_id) {
// double memtotal = 0;
// double memfree = 0;
// PADO::Utils::system_memory(memtotal, memfree);
// printf("iter: %u "
// "host_id: %d "
// "global_num_actives: %u "
// "L.size(): %.2fGB "
// "memtotal: %.2fGB "
// "memfree: %.2fGB\n",
// iter,
// host_id,
// global_num_actives,
// get_index_size() * 1.0 / (1 << 30),
// memtotal / 1024,
// memfree / 1024);
//// }
// }
//#endif
// Traverse active vertices to push their labels as candidates
// Send masters' newly added labels to other hosts
try
{
// scatter_time -= WallTimer::get_time_mark();
////// Multiple pushing
// // Divide the pushing into many-time runs, to reduce the peak memory footprint.
// const VertexID chunk_size = 1 << 20;
// VertexID remainder = global_num_actives % chunk_size;
// VertexID bound_global_i = global_num_actives - remainder;
//// VertexID remainder = end_active_queue % chunk_size;
//// VertexID bound_active_queue = end_active_queue - remainder;
// VertexID local_size;
// for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) {
// if (global_i < end_active_queue) {
// local_size = end_active_queue - global_i;
// } else {
// local_size = 0;
// }
//// {//test
//// if (1024 == roots_start && 7 == host_id) {
//// printf("S0 host_id: %d global_i: %u bound_global_i: %u local_size: %u\n",
//// host_id, global_i, bound_global_i, local_size);
//// }
//// }
// schedule_label_pushing_para(
// G,
// roots_start,
// used_bp_roots,
// active_queue,
// global_i,
// chunk_size,
// local_size,
// got_candidates_queue,
// end_got_candidates_queue,
// short_index,
// bp_labels_table,
// got_candidates,
// is_active,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// iter);
// }
// if (remainder) {
// if (bound_global_i < end_active_queue) {
// local_size = end_active_queue - bound_global_i;
// } else {
// local_size = 0;
// }
// schedule_label_pushing_para(
// G,
// roots_start,
// used_bp_roots,
// active_queue,
// bound_global_i,
// remainder,
// local_size,
// got_candidates_queue,
// end_got_candidates_queue,
// short_index,
// bp_labels_table,
// got_candidates,
// is_active,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// iter);
// }
//// Single pushing
schedule_label_pushing_para(
G,
roots_start,
used_bp_roots,
active_queue,
0,
global_num_actives,
end_active_queue,
got_candidates_queue,
end_got_candidates_queue,
short_index,
bp_labels_table,
got_candidates,
is_active,
once_candidated_queue,
end_once_candidated_queue,
once_candidated,
iter);
end_active_queue = 0;
// scatter_time += WallTimer::get_time_mark();
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("pushing: bad_alloc "
"iter: %u "
"host_id: %d "
"global_num_actives: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
iter,
host_id,
global_num_actives,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
// {//test
// if (0 == host_id) {
// printf("host_id: %u pushing finished...\n", host_id);
// }
// }
// Traverse vertices in the got_candidates_queue to insert labels
{
// gather_time -= WallTimer::get_time_mark();
std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table
// pair.first: root id
// pair.second: label (global) id of the root
if (end_got_candidates_queue >= THRESHOLD_PARALLEL) {
////// Multiple checking/inserting
// const VertexID chunk_size = 1 << 20;
// VertexID remainder = end_got_candidates_queue % chunk_size;
// VertexID bound_i_q = end_got_candidates_queue - remainder;
// for (VertexID i_q = 0; i_q < bound_i_q; i_q += chunk_size) {
// schedule_label_inserting_para(
// G,
// roots_start,
// roots_size,
// short_index,
// dist_table,
// got_candidates_queue,
// i_q,
// chunk_size,
// got_candidates,
// active_queue,
// end_active_queue,
// is_active,
// buffer_send,
// iter);
// }
// if (remainder) {
// schedule_label_inserting_para(
// G,
// roots_start,
// roots_size,
// short_index,
// dist_table,
// got_candidates_queue,
// bound_i_q,
// remainder,
// got_candidates,
// active_queue,
// end_active_queue,
// is_active,
// buffer_send,
// iter);
// }
//// Single checking/inserting
schedule_label_inserting_para(
G,
roots_start,
roots_size,
short_index,
dist_table,
got_candidates_queue,
0,
end_got_candidates_queue,
got_candidates,
active_queue,
end_active_queue,
is_active,
buffer_send,
iter);
////// Backup
// // Prepare for parallel active_queue
// // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already.
// // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it.
// std::vector<VertexID> offsets_tmp_active_queue;
// std::vector<VertexID> tmp_active_queue;
// std::vector<VertexID> sizes_tmp_active_queue;
// std::vector<EdgeID> offsets_tmp_buffer_send;
// std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send;
// std::vector<EdgeID> sizes_tmp_buffer_send;
// EdgeID total_send_labels;
//
// try {
// offsets_tmp_active_queue.resize(end_got_candidates_queue);
//#pragma omp parallel for
// for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) {
// offsets_tmp_active_queue[i_q] = i_q;
// }
// tmp_active_queue.resize(end_got_candidates_queue);
// sizes_tmp_active_queue.resize(end_got_candidates_queue,
// 0); // Size will only be 0 or 1, but it will become offsets eventually.
//
// // Prepare for parallel buffer_send
//// std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue);
// offsets_tmp_buffer_send.resize(end_got_candidates_queue);
//#pragma omp parallel for
// for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) {
// VertexID v_id_local = got_candidates_queue[i_q];
// VertexID v_global_id = G.get_global_vertex_id(v_id_local);
// if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) {
// // If v_global_id is root, its new labels should be put into buffer_send
// offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que;
// } else {
// offsets_tmp_buffer_send[i_q] = 0;
// }
// }
// total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send);
// tmp_buffer_send.resize(total_send_labels);
// sizes_tmp_buffer_send.resize(end_got_candidates_queue, 0);
// }
// catch (const std::bad_alloc &) {
// double memtotal = 0;
// double memfree = 0;
// PADO::Utils::system_memory(memtotal, memfree);
// printf("L%u_tmp_buffer_send: bad_alloc "
// "host_id: %d "
// "iter: %u "
// "end_got_candidates_queue: %u "
// "total_send_labels: %u "
// "L.size(): %.2fGB "
// "memtotal: %.2fGB "
// "memfree: %.2fGB\n",
// __LINE__,
// host_id,
// iter,
// end_got_candidates_queue,
// total_send_labels,
// get_index_size() * 1.0 / (1 << 30),
// memtotal / 1024,
// memfree / 1024);
// exit(1);
// }
//
//#pragma omp parallel for
// for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
// VertexID v_id_local = got_candidates_queue[i_queue];
// VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
// got_candidates[v_id_local] = 0; // reset got_candidates
// // Traverse v_id's all candidates
// VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
// for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
// VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
// short_index[v_id_local].is_candidate[cand_root_id] = 0;
// // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
// if (distance_query(
// cand_root_id,
// v_id_local,
// roots_start,
// // L,
// dist_table,
// iter)) {
// if (!is_active[v_id_local]) {
// is_active[v_id_local] = 1;
//// active_queue[end_active_queue++] = v_id_local;
// tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local;
// }
// ++inserted_count;
// // The candidate cand_root_id needs to be added into v_id's label
// insert_label_only_para(
// cand_root_id,
// v_id_local,
// roots_start,
// roots_size,
// G,
// tmp_buffer_send,
// sizes_tmp_buffer_send[i_queue],
// offsets_tmp_buffer_send[i_queue]);
//// buffer_send);
// }
// }
// short_index[v_id_local].end_candidates_que = 0;
// if (0 != inserted_count) {
// // Update other arrays in L[v_id] if new labels were inserted in this iteration
// update_label_indices(
// v_id_local,
// inserted_count,
// // L,
//// short_index,
//// b_id,
// iter);
// }
// }
//
// {// Collect elements from tmp_active_queue to active_queue
// VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue);
// PADO::collect_into_queue(
// tmp_active_queue,
// offsets_tmp_active_queue,
// sizes_tmp_active_queue,
// total_new,
// active_queue,
// end_active_queue);
// }
// {// Collect elements from tmp_buffer_send to buffer_send
// EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send);
// try {
// buffer_send.resize(total_new);
// }
// catch (const std::bad_alloc &) {
// double memtotal = 0;
// double memfree = 0;
// PADO::Utils::system_memory(memtotal, memfree);
// printf("L%u_buffer_send: bad_alloc "
// "iter: %u "
// "host_id: %d "
// "L.size(): %.2fGB "
// "memtotal: %.2fGB "
// "memfree: %.2fGB\n",
// __LINE__,
// iter,
// host_id,
// get_index_size() * 1.0 / (1 << 30),
// memtotal / 1024,
// memfree / 1024);
// exit(1);
// }
// EdgeID zero_size = 0;
// PADO::collect_into_queue(
// tmp_buffer_send,
// offsets_tmp_buffer_send,
// sizes_tmp_buffer_send,
// total_new,
// buffer_send,
// zero_size);
// }
} else {
for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
VertexID v_id_local = got_candidates_queue[i_queue];
VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
got_candidates[v_id_local] = 0; // reset got_candidates
// Traverse v_id's all candidates
VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
short_index[v_id_local].is_candidate[cand_root_id] = 0;
// Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
if (distance_query(
cand_root_id,
v_id_local,
roots_start,
// L,
dist_table,
iter)) {
if (!is_active[v_id_local]) {
is_active[v_id_local] = 1;
active_queue[end_active_queue++] = v_id_local;
}
++inserted_count;
// The candidate cand_root_id needs to be added into v_id's label
insert_label_only_seq(
cand_root_id,
v_id_local,
roots_start,
roots_size,
G,
// dist_table,
buffer_send);
// iter);
}
}
short_index[v_id_local].end_candidates_que = 0;
if (0 != inserted_count) {
// Update other arrays in L[v_id] if new labels were inserted in this iteration
update_label_indices(
v_id_local,
inserted_count,
// L,
short_index,
// b_id,
iter);
}
}
}
// {//test
// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send));
// }
end_got_candidates_queue = 0; // Set the got_candidates_queue empty
// Sync the dist_table
for (int root = 0; root < num_hosts; ++root) {
std::vector<std::pair<VertexID, VertexID>> buffer_recv;
one_host_bcasts_buffer_to_buffer(root,
buffer_send,
buffer_recv);
if (buffer_recv.empty()) {
continue;
}
EdgeID size_buffer_recv = buffer_recv.size();
try {
if (size_buffer_recv >= THRESHOLD_PARALLEL) {
// Get label number for every root
std::vector<VertexID> sizes_recved_root_labels(roots_size, 0);
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const std::pair<VertexID, VertexID> &e = buffer_recv[i_l];
VertexID root_id = e.first;
__atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST);
}
// Resize the recved_dist_table for every root
#pragma omp parallel for
for (VertexID root_id = 0; root_id < roots_size; ++root_id) {
VertexID old_size = recved_dist_table[root_id].size();
VertexID tmp_size = sizes_recved_root_labels[root_id];
if (tmp_size) {
recved_dist_table[root_id].resize(old_size + tmp_size);
sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size
}
// If tmp_size == 0, root_id has no received labels.
// sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size
}
// Recorde received labels in recved_dist_table
#pragma omp parallel for
for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) {
const std::pair<VertexID, VertexID> &e = buffer_recv[i_l];
VertexID root_id = e.first;
VertexID cand_real_id = e.second;
dist_table[root_id][cand_real_id] = iter;
PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id],
cand_real_id);
}
} else {
for (const std::pair<VertexID, VertexID> &e : buffer_recv) {
VertexID root_id = e.first;
VertexID cand_real_id = e.second;
dist_table[root_id][cand_real_id] = iter;
// Record the received element, for future reset
recved_dist_table[root_id].push_back(cand_real_id);
}
}
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("recved_dist_table: bad_alloc "
"host_id: %d "
"iter: %u "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
iter,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
}
// Sync the global_num_actives
// message_time -= WallTimer::get_time_mark();
MPI_Allreduce(&end_active_queue,
&global_num_actives,
1,
V_ID_Type,
MPI_MAX,
// MPI_SUM,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// gather_time += WallTimer::get_time_mark();
}
// {//test
// if (0 == host_id) {
// printf("iter: %u inserting labels finished.\n", iter);
// }
// }
}
// Reset the dist_table
// clearup_time -= WallTimer::get_time_mark();
reset_at_end(
G,
// roots_start,
// roots_master_local,
dist_table,
recved_dist_table,
bp_labels_table,
once_candidated_queue,
end_once_candidated_queue);
// clearup_time += WallTimer::get_time_mark();
// {//test
// if (0 == host_id) {
// printf("host_id: %u resetting finished.\n", host_id);
// }
// }
}
//// Sequential Version
//template <VertexID BATCH_SIZE>
//inline void DistBVCPLL<BATCH_SIZE>::
//batch_process(
// const DistGraph &G,
// VertexID b_id,
// VertexID roots_start, // start id of roots
// VertexID roots_size, // how many roots in the batch
// const std::vector<uint8_t> &used_bp_roots,
// std::vector<VertexID> &active_queue,
// VertexID &end_active_queue,
// std::vector<VertexID> &got_candidates_queue,
// VertexID &end_got_candidates_queue,
// std::vector<ShortIndex> &short_index,
// std::vector< std::vector<UnweightedDist> > &dist_table,
// std::vector< std::vector<VertexID> > &recved_dist_table,
// std::vector<BPLabelType> &bp_labels_table,
// std::vector<uint8_t> &got_candidates,
//// std::vector<bool> &got_candidates,
// std::vector<uint8_t> &is_active,
//// std::vector<bool> &is_active,
// std::vector<VertexID> &once_candidated_queue,
// VertexID &end_once_candidated_queue,
// std::vector<uint8_t> &once_candidated)
//// std::vector<bool> &once_candidated)
//{
// // At the beginning of a batch, initialize the labels L and distance buffer dist_table;
// initializing_time -= WallTimer::get_time_mark();
// VertexID global_num_actives = initialization(G,
// short_index,
// dist_table,
// recved_dist_table,
// bp_labels_table,
// active_queue,
// end_active_queue,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// b_id,
// roots_start,
// roots_size,
//// roots_master_local,
// used_bp_roots);
// initializing_time += WallTimer::get_time_mark();
// UnweightedDist iter = 0; // The iterator, also the distance for current iteration
//// {//test
//// printf("host_id: %u initialization finished.\n", host_id);
//// }
//
//
// while (global_num_actives) {
////#ifdef DEBUG_MESSAGES_ON
//// {//
//// if (0 == host_id) {
//// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives);
//// }
//// }
////#endif
// ++iter;
// // Traverse active vertices to push their labels as candidates
// // Send masters' newly added labels to other hosts
// {
// scatter_time -= WallTimer::get_time_mark();
// std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue);
// //.first: Vertex ID
// //.second: size of labels
// std::vector<VertexID> buffer_send_labels;
// // Prepare masters' newly added labels for sending
// for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) {
// VertexID v_head_local = active_queue[i_q];
// is_active[v_head_local] = 0; // reset is_active
// VertexID v_head_global = G.get_global_vertex_id(v_head_local);
// const IndexType &Lv = L[v_head_local];
// // Prepare the buffer_send_indices
// buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size);
// // These 2 index are used for traversing v_head's last inserted labels
// VertexID l_i_start = Lv.distances.rbegin()->start_index;
// VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size;
// for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) {
// VertexID label_root_id = Lv.vertices[l_i];
// buffer_send_labels.push_back(label_root_id);
// }
// }
// end_active_queue = 0;
//
// for (int root = 0; root < num_hosts; ++root) {
// // Get the indices
// std::vector< std::pair<VertexID, VertexID> > indices_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_indices,
// indices_buffer);
// if (indices_buffer.empty()) {
// continue;
// }
// // Get the labels
// std::vector<VertexID> labels_buffer;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send_labels,
// labels_buffer);
// // Push those labels
// EdgeID start_index = 0;
// for (const std::pair<VertexID, VertexID> e : indices_buffer) {
// VertexID v_head_global = e.first;
// EdgeID bound_index = start_index + e.second;
// if (G.local_out_degrees[v_head_global]) {
// local_push_labels(
// v_head_global,
// start_index,
// bound_index,
// roots_start,
// labels_buffer,
// G,
// short_index,
// got_candidates_queue,
// end_got_candidates_queue,
// got_candidates,
// once_candidated_queue,
// end_once_candidated_queue,
// once_candidated,
// bp_labels_table,
// used_bp_roots,
// iter);
// }
// start_index = bound_index;
// }
// }
// scatter_time += WallTimer::get_time_mark();
// }
//
// // Traverse vertices in the got_candidates_queue to insert labels
// {
// gather_time -= WallTimer::get_time_mark();
// std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table
// // pair.first: root id
// // pair.second: label (global) id of the root
// for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) {
// VertexID v_id_local = got_candidates_queue[i_queue];
// VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates
// got_candidates[v_id_local] = 0; // reset got_candidates
// // Traverse v_id's all candidates
// VertexID bound_cand_i = short_index[v_id_local].end_candidates_que;
// for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) {
// VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i];
// short_index[v_id_local].is_candidate[cand_root_id] = 0;
// // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance
// if ( distance_query(
// cand_root_id,
// v_id_local,
// roots_start,
// // L,
// dist_table,
// iter) ) {
// if (!is_active[v_id_local]) {
// is_active[v_id_local] = 1;
// active_queue[end_active_queue++] = v_id_local;
// }
// ++inserted_count;
// // The candidate cand_root_id needs to be added into v_id's label
// insert_label_only(
// cand_root_id,
// v_id_local,
// roots_start,
// roots_size,
// G,
//// dist_table,
// buffer_send);
//// iter);
// }
// }
// short_index[v_id_local].end_candidates_que = 0;
// if (0 != inserted_count) {
// // Update other arrays in L[v_id] if new labels were inserted in this iteration
// update_label_indices(
// v_id_local,
// inserted_count,
// // L,
// short_index,
// b_id,
// iter);
// }
// }
//// {//test
//// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send));
//// }
// end_got_candidates_queue = 0; // Set the got_candidates_queue empty
// // Sync the dist_table
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<std::pair<VertexID, VertexID>> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
// for (const std::pair<VertexID, VertexID> &e : buffer_recv) {
// VertexID root_id = e.first;
// VertexID cand_real_id = e.second;
// dist_table[root_id][cand_real_id] = iter;
// // Record the received element, for future reset
// recved_dist_table[root_id].push_back(cand_real_id);
// }
// }
//
// // Sync the global_num_actives
// MPI_Allreduce(&end_active_queue,
// &global_num_actives,
// 1,
// V_ID_Type,
// MPI_SUM,
// MPI_COMM_WORLD);
// gather_time += WallTimer::get_time_mark();
// }
// }
//
// // Reset the dist_table
// clearup_time -= WallTimer::get_time_mark();
// reset_at_end(
//// G,
//// roots_start,
//// roots_master_local,
// dist_table,
// recved_dist_table,
// bp_labels_table);
// clearup_time += WallTimer::get_time_mark();
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Every host h_i broadcast to others
// for (int root = 0; root < num_hosts; ++root) {
// std::vector<E_T> buffer_recv;
// one_host_bcasts_buffer_to_buffer(root,
// buffer_send,
// buffer_recv);
// if (buffer_recv.empty()) {
// continue;
// }
//// uint64_t size_buffer_send = buffer_send.size();
//// // Sync the size_buffer_send.
//// message_time -= WallTimer::get_time_mark();
//// MPI_Bcast(&size_buffer_send,
//// 1,
//// MPI_UINT64_T,
//// root,
//// MPI_COMM_WORLD);
//// message_time += WallTimer::get_time_mark();
////// {// test
////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send);
////// }
//// if (!size_buffer_send) {
//// continue;
//// }
//// message_time -= WallTimer::get_time_mark();
//// std::vector<E_T> buffer_recv(size_buffer_send);
//// if (host_id == root) {
//// buffer_recv.assign(buffer_send.begin(), buffer_send.end());
//// }
//// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize;
//// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) {
//// // Only need 1 broadcast
////
//// MPI_Bcast(buffer_recv.data(),
//// bytes_buffer_send,
//// MPI_CHAR,
//// root,
//// MPI_COMM_WORLD);
//// } else {
//// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1;
//// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1;
//// size_t offset = 0;
//// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
////// size_t offset = b_i * unit_buffer_size;
//// size_t size_unit_buffer = b_i == num_unit_buffers - 1
//// ? size_buffer_send - offset
//// : unit_buffer_size;
//// MPI_Bcast(buffer_recv.data() + offset,
//// size_unit_buffer * ETypeSize,
//// MPI_CHAR,
//// root,
//// MPI_COMM_WORLD);
//// offset += unit_buffer_size;
//// }
//// }
//// message_time += WallTimer::get_time_mark();
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Host processes locally.
// for (const E_T &e : buffer_send) {
// fun(e);
// }
//
// // Every host sends to others
// for (int src = 0; src < num_hosts; ++src) {
// if (host_id == src) {
// // Send from src
// message_time -= WallTimer::get_time_mark();
// for (int hop = 1; hop < num_hosts; ++hop) {
// int dst = hop_2_root_host_id(hop, host_id);
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// }
// message_time += WallTimer::get_time_mark();
// } else {
// // Receive from src
// for (int hop = 1; hop < num_hosts; ++hop) {
// int dst = hop_2_root_host_id(hop, src);
// if (host_id == dst) {
// message_time -= WallTimer::get_time_mark();
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// }
// }
// }
//}
//// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// // Host processes locally.
// for (const E_T &e : buffer_send) {
// fun(e);
// }
// // Every host sends (num_hosts - 1) times
// for (int hop = 1; hop < num_hosts; ++hop) {
// int src = hop_2_me_host_id(-hop);
// int dst = hop_2_me_host_id(hop);
// if (src != dst) { // Normal case
// // When host_id is odd, first receive, then send.
// if (static_cast<uint32_t>(host_id) & 1U) {
// message_time -= WallTimer::get_time_mark();
// // Receive first.
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u recved_from: %u\n", host_id, src);
// }
// // Send then.
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u send_to: %u\n", host_id, dst);
// }
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// } else { // When host_id is even, first send, then receive.
// // Send first.
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u send_to: %u\n", host_id, dst);
// }
// // Receive then.
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// {//test
// printf("host_id: %u recved_from: %u\n", host_id, src);
// }
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// } else { // If host_id is higher than dst, first send, then receive
// // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2.
// if (host_id < dst) {
// // Send
// message_time -= WallTimer::get_time_mark();
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// // Receive
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// } else { // Otherwise, if host_id is lower than dst, first receive, then send
// // Receive
// message_time -= WallTimer::get_time_mark();
// std::vector<E_T> buffer_recv;
// MPI_Instance::recv_buffer_from_src(buffer_recv,
// src,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// // Send
// MPI_Instance::send_buffer_2_dst(buffer_send,
// dst,
// SENDING_BUFFER_SEND,
// SENDING_SIZE_BUFFER_SEND);
// message_time += WallTimer::get_time_mark();
// // Process
// if (buffer_recv.empty()) {
// continue;
// }
// for (const E_T &e : buffer_recv) {
// fun(e);
// }
// }
// }
// }
//}
//// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer.
//template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE>
//template <typename E_T, typename F>
//inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::
//every_host_bcasts_buffer_and_proc(
// std::vector<E_T> &buffer_send,
// F &fun)
//{
// const uint32_t UNIT_BUFFER_SIZE = 16U << 20U;
// // Every host h_i broadcast to others
// for (int h_i = 0; h_i < num_hosts; ++h_i) {
// uint64_t size_buffer_send = buffer_send.size();
// // Sync the size_buffer_send.
// message_time -= WallTimer::get_time_mark();
// MPI_Bcast(&size_buffer_send,
// 1,
// MPI_UINT64_T,
// h_i,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
//// {// test
//// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send);
//// }
// if (!size_buffer_send) {
// continue;
// }
// uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE;
//
// // Broadcast the buffer_send
// for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
// // Prepare the unit buffer
// message_time -= WallTimer::get_time_mark();
// size_t offset = b_i * UNIT_BUFFER_SIZE;
// size_t size_unit_buffer = b_i == num_unit_buffers - 1
// ? size_buffer_send - offset
// : UNIT_BUFFER_SIZE;
// std::vector<E_T> unit_buffer(size_unit_buffer);
// // Copy the messages from buffer_send to unit buffer.
// if (host_id == h_i) {
// unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer);
// }
// // Broadcast the unit buffer
// MPI_Bcast(unit_buffer.data(),
// MPI_Instance::get_sending_size(unit_buffer),
// MPI_CHAR,
// h_i,
// MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// // Process every element of unit_buffer
// for (const E_T &e : unit_buffer) {
// fun(e);
// }
// }
// }
//}
// Function: Host root broadcasts its sending buffer to a receiving buffer.
template <VertexID BATCH_SIZE>
template <typename E_T>
inline void DistBVCPLL<BATCH_SIZE>::
one_host_bcasts_buffer_to_buffer(
int root,
std::vector<E_T> &buffer_send,
std::vector<E_T> &buffer_recv)
{
const size_t ETypeSize = sizeof(E_T);
volatile uint64_t size_buffer_send = 0;
if (host_id == root) {
size_buffer_send = buffer_send.size();
}
// Sync the size_buffer_send.
// message_time -= WallTimer::get_time_mark();
// {//test
// if (0 == root && size_buffer_send == 16 && 1024 == caller_line) {
//// if (0 == root && size_buffer_send == 16 && 0 == host_id) {
// printf("before: host_id: %d size_buffer_send: %lu\n",
// host_id,
// size_buffer_send);
// }
// }
MPI_Bcast((void *) &size_buffer_send,
1,
MPI_UINT64_T,
root,
MPI_COMM_WORLD);
// message_time += WallTimer::get_time_mark();
// {//test
//// if (0 == root && size_buffer_send == 16 && 0 == host_id) {
// if (0 == root && size_buffer_send == 16 && 1024 == caller_line) {
// printf("after: host_id: %d size_buffer_send: %lu\n",
// host_id,
// size_buffer_send);
// }
// }
try {
buffer_recv.resize(size_buffer_send);
}
catch (const std::bad_alloc &) {
double memtotal = 0;
double memfree = 0;
PADO::Utils::system_memory(memtotal, memfree);
printf("one_host_bcasts_buffer_to_buffer: bad_alloc "
"host_id: %d "
"L.size(): %.2fGB "
"memtotal: %.2fGB "
"memfree: %.2fGB\n",
host_id,
get_index_size() * 1.0 / (1 << 30),
memtotal / 1024,
memfree / 1024);
exit(1);
}
if (!size_buffer_send) {
return;
}
// Broadcast the buffer_send
// message_time -= WallTimer::get_time_mark();
if (host_id == root) {
// buffer_recv.assign(buffer_send.begin(), buffer_send.end());
buffer_recv.swap(buffer_send);
}
uint64_t bytes_buffer_send = size_buffer_send * ETypeSize;
if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) {
// Only need 1 broadcast
MPI_Bcast(buffer_recv.data(),
bytes_buffer_send,
MPI_CHAR,
root,
MPI_COMM_WORLD);
} else {
const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1;
const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1;
size_t offset = 0;
for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) {
size_t size_unit_buffer = b_i == num_unit_buffers - 1
? size_buffer_send - offset
: unit_buffer_size;
MPI_Bcast(buffer_recv.data() + offset,
size_unit_buffer * ETypeSize,
MPI_CHAR,
root,
MPI_COMM_WORLD);
offset += unit_buffer_size;
}
}
// message_time += WallTimer::get_time_mark();
}
}
#endif //PADO_DPADO_H
|
openmp.h | #ifndef _OPENMP_H
#define _OPENMP_H
#if defined(_OPENMP)
#include <omp.h>
#else
typedef int omp_int_t;
inline omp_int_t omp_get_thread_num() { return 0;}
inline omp_int_t omp_get_num_threads() { return 1;}
inline omp_int_t omp_get_max_threads() { return 1;}
#endif
template<class T>
int inline atomic_add(const std::complex<double> m,std::complex<T> *M){
T * M_v = reinterpret_cast<T*>(M);
const T m_real = m.real();
const T m_imag = m.imag();
#pragma omp atomic
M_v[0] += m_real;
#pragma omp atomic
M_v[1] += m_imag;
return 0;
}
template<class T>
int inline atomic_add(const std::complex<double> m,T *M){
if(std::abs(m.imag())>1.1e-15){
return 1;
}
else{
const T m_real = m.real();
#pragma omp atomic
M[0] += m_real;
return 0;
}
}
#endif |
hello.c |
#include <stdio.h>
#include <omp.h>
int
main (void)
{
int nthreads, tid;
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf("Hello OpenMP from thread %d\n", tid);
if (tid == 0) { // main thread
nthreads = omp_get_num_threads();
printf("nthreads = %d\n", nthreads);
}
}
return 0;
}
|
tensor_cpu-inl.h | /*!
* Copyright (c) 2014 by Contributors
* \file tensor_cpu-inl.h
* \brief implementation of CPU host code
* \author Bing Xu, Tianqi Chen
*/
#ifndef MSHADOW_TENSOR_CPU_INL_H_
#define MSHADOW_TENSOR_CPU_INL_H_
#include <cstring>
#include <functional>
#include <utility>
#include <vector>
#include "./base.h"
#include "./dot_engine-inl.h"
#include "./packet-inl.h"
#include "./tensor.h"
namespace mshadow {
template <>
inline void InitTensorEngine<cpu>(int dev_id) {}
template <>
inline void ShutdownTensorEngine<cpu>(void) {}
template <>
inline void SetDevice<cpu>(int devid) {}
template <>
inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle,
bool create_dnn_handle, int dev_id) {
return new Stream<cpu>();
}
template <>
inline void DeleteStream<cpu>(Stream<cpu> *stream) {
delete stream;
}
template <int ndim>
inline std::ostream &operator<<(std::ostream &os,
const Shape<ndim> &shape) { // NOLINT(*)
os << '(';
for (int i = 0; i < ndim; ++i) {
if (i != 0) os << ',';
os << shape[i];
}
// python style tuple
if (ndim == 1) os << ',';
os << ')';
return os;
}
template <typename xpu>
inline void *AllocHost_(size_t size);
template <typename xpu>
inline void FreeHost_(void *dptr);
#ifdef __CUDACC__
template <>
inline void *AllocHost_<gpu>(size_t size) {
void *dptr;
MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable));
return dptr;
}
template <>
inline void FreeHost_<gpu>(void *dptr) {
MSHADOW_CUDA_CALL(cudaFreeHost(dptr));
}
#endif
template <>
inline void *AllocHost_<cpu>(size_t size) {
size_t pitch;
return packet::AlignedMallocPitch(&pitch, size, 1);
}
template <>
inline void FreeHost_<cpu>(void *dptr) {
packet::AlignedFree(dptr);
}
template <typename xpu, int dim, typename DType>
inline void AllocHost(Tensor<cpu, dim, DType> *obj) {
obj->stride_ = obj->size(dim - 1);
CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost";
void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType));
obj->dptr_ = reinterpret_cast<DType *>(dptr);
}
template <typename xpu, int dim, typename DType>
inline void FreeHost(Tensor<cpu, dim, DType> *obj) {
if (obj->dptr_ == NULL) {
LOG(FATAL) << "FreeHost:: double free";
}
FreeHost_<xpu>(obj->dptr_);
obj->dptr_ = NULL;
}
template <int dim, typename DType>
inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) {
size_t pitch;
void *dptr;
if (pad) {
dptr = packet::AlignedMallocPitch(
&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]);
obj->stride_ = static_cast<index_t>(pitch / sizeof(DType));
} else {
obj->stride_ = obj->size(dim - 1);
dptr = packet::AlignedMallocPitch(&pitch,
obj->shape_.Size() * sizeof(DType), 1);
}
obj->dptr_ = reinterpret_cast<DType *>(dptr);
}
template <typename Device, typename DType, int dim>
inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape,
DType initv, bool pad,
Stream<Device> *stream_) {
Tensor<Device, dim, DType> obj(shape);
obj.stream_ = stream_;
AllocSpace(&obj, pad);
MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv));
return obj;
}
template <int dim, typename DType>
inline void FreeSpace(Tensor<cpu, dim, DType> *obj) {
packet::AlignedFree(obj->dptr_);
obj->dptr_ = NULL;
}
template <int dim, typename DType>
inline void Copy(Tensor<cpu, dim, DType> _dst,
const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) {
CHECK_EQ(_dst.shape_, _src.shape_)
<< "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_;
if (_dst.CheckContiguous() && _src.CheckContiguous()) {
memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size());
} else {
Tensor<cpu, 2, DType> dst = _dst.FlatTo2D();
Tensor<cpu, 2, DType> src = _src.FlatTo2D();
for (index_t y = 0; y < dst.size(0); ++y) {
memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1));
}
}
}
template <typename Saver, typename R, int dim, typename DType, typename E>
inline void MapPlan(TRValue<R, cpu, dim, DType> *dst,
const expr::Plan<E, DType> &plan) {
Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D();
expr::Plan<R, DType> dplan = expr::MakePlan(dst->self());
#ifndef __CUDACC__
#pragma omp parallel for
#endif
// temp remove openmp, as default setting throttles CPU
for (openmp_index_t y = 0; y < shape[0]; ++y) {
for (index_t x = 0; x < shape[1]; ++x) {
// trust your compiler! -_- they will optimize it
Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x));
}
}
}
// code to handle SSE optimization
template <bool pass_check, typename Saver, typename R, int dim, typename DType,
typename E, int etype>
struct MapExpCPUEngine {
inline static void Map(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
MapPlan<Saver>(dst, MakePlan(exp.self()));
}
};
template <typename SV, int dim, typename DType, typename E, int etype>
struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E,
etype> {
inline static void Map(Tensor<cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(
exp.self()) &&
expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>,
MSHADOW_DEFAULT_PACKET>::Check(*dst)) {
expr::MapPacketPlan<SV>(
dst->self(),
expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self()));
} else {
MapPlan<SV>(dst, MakePlan(exp.self()));
}
}
};
template <typename Saver, typename R, int dim, typename DType, typename E,
int etype>
inline void MapExp(TRValue<R, cpu, dim, DType> *dst,
const expr::Exp<E, DType, etype> &exp) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass>::
Error_All_Tensor_in_Exp_Must_Have_Same_Type();
Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self());
Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self());
CHECK(eshape[0] == 0 || eshape == dshape)
<< "Assignment: Shape of Tensors are not consistent with target, "
<< "eshape: " << eshape << " dshape:" << dshape;
MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R,
dim, DType, E, etype>::Map(dst->ptrself(), exp);
}
template <typename Saver, typename Reducer, typename R, typename DType,
typename E, int etype>
inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass>::
Error_TypeCheck_Not_Pass_For_Reduce_Exp();
Shape<2> eshape =
expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>::Check(exp.self()).FlatTo2D();
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[1], dshape[0])
<< "MapReduceKeepLowest::reduction dimension do not match";
CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor";
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
#ifndef __CUDACC__
#pragma omp parallel for
#endif
for (openmp_index_t x = 0; x < eshape[1]; ++x) {
DType res = splan.Eval(0, x);
for (index_t y = 1; y < eshape[0]; ++y) {
Reducer::Reduce(res, splan.Eval(y, x));
}
Saver::template Save<DType>(dplan.REval(0, x), res * scale);
}
}
template <typename Saver, typename Reducer, int dimkeep, typename R,
typename DType, typename E, int etype>
inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst,
const expr::Exp<E, DType, etype> &exp,
DType scale) {
expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass>::
Error_TypeCheck_Not_Pass_For_Reduce_Exp();
typedef Shape<expr::ExpInfo<E>::kDim> EShape;
EShape eshape =
expr::ShapeCheck<expr::ExpInfo<E>::kDim, E>::Check(exp.self());
Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self());
CHECK_EQ(eshape[dimkeep], dshape[0])
<< "MapReduceKeepHighDim::reduction dimension do not match";
// use equvalent form
Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep],
eshape.ProdShape(dimkeep + 1, EShape::kSubdim),
eshape[EShape::kSubdim]);
// execution
expr::Plan<R, DType> dplan = MakePlan(dst->self());
expr::Plan<E, DType> splan = MakePlan(exp.self());
#ifndef __CUDACC__
#pragma omp parallel for
#endif
for (openmp_index_t c = 0; c < pshape[1]; ++c) {
DType res;
Reducer::SetInitValue(res);
for (index_t n = 0; n < pshape[0]; ++n) {
DType tres;
Reducer::SetInitValue(tres);
for (index_t y = 0; y < pshape[2]; ++y) {
for (index_t x = 0; x < pshape[3]; ++x) {
Reducer::Reduce(tres,
splan.Eval((n * pshape[1] + c) * pshape[2] + y, x));
}
}
Reducer::Reduce(res, tres);
}
Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale));
}
}
template <typename DType>
inline void Softmax(Tensor<cpu, 1, DType> dst,
const Tensor<cpu, 1, DType> &energy) {
DType mmax = energy[0];
for (index_t x = 1; x < dst.size(0); ++x) {
if (mmax < energy[x]) mmax = energy[x];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] = std::exp(energy[x] - mmax);
sum += dst[x];
}
for (index_t x = 0; x < dst.size(0); ++x) {
dst[x] /= sum;
}
}
template <typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label) {
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
template <typename DType>
inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const float alpha) {
const float smooth_grad = (alpha / (dst.size(1) - 1));
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const index_t k = static_cast<int>(label[y]);
for (index_t x = 0; x < dst.size(1); ++x) {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f + alpha;
} else {
dst[y][x] = src[y][x] - smooth_grad;
}
}
}
}
template <typename DType>
inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const DType &ignore_label) {
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y]);
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (static_cast<int>(ignore_label) == k) {
dst[y][x] = 0.0f;
} else {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f;
} else {
dst[y][x] = src[y][x];
}
}
}
}
}
template <typename DType>
inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &src,
const Tensor<cpu, 1, DType> &label,
const DType &ignore_label, const float alpha) {
const float smooth_grad = (alpha / (dst.size(1) - 1));
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y]);
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (static_cast<int>(ignore_label) == k) {
dst[y][x] = 0.0f;
} else {
if (x == k) {
dst[y][k] = src[y][k] - 1.0f + alpha;
} else {
dst[y][x] = src[y][x] - smooth_grad;
}
}
}
}
}
template <typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label) {
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y][n]);
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
template <typename DType>
inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const float alpha) {
const float smooth_grad = (alpha / (dst.size(1) - 1));
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y][n]);
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f + alpha;
} else {
dst[y][x][n] = src[y][x][n] - smooth_grad;
}
}
}
}
}
template <typename DType>
inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const DType &ignore_label) {
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y][n]);
if (k == static_cast<int>(ignore_label)) {
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
dst[y][x][n] = DType(0.0f);
}
} else {
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f;
} else {
dst[y][x][n] = src[y][x][n];
}
}
}
}
}
}
template <typename DType>
inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &src,
const Tensor<cpu, 2, DType> &label,
const DType &ignore_label, const float alpha) {
const float smooth_grad = (alpha / (dst.size(1) - 1));
#pragma omp parallel for
for (openmp_index_t n = 0; n < dst.size(2); ++n) {
for (index_t y = 0; y < dst.size(0); ++y) {
const int k = static_cast<int>(label[y][n]);
if (k == static_cast<int>(ignore_label)) {
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
dst[y][x][n] = DType(0.0f);
}
} else {
for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) {
if (x == k) {
dst[y][k][n] = src[y][k][n] - 1.0f + alpha;
} else {
dst[y][x][n] = src[y][x][n] - smooth_grad;
}
}
}
}
}
}
template <typename DType>
inline void Softmax(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 2, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
Softmax(dst[y], energy[y]);
}
}
template <typename DType>
inline void Softmax(Tensor<cpu, 3, DType> dst,
const Tensor<cpu, 3, DType> &energy) {
CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch";
#pragma omp parallel for
for (openmp_index_t y = 0; y < dst.size(0); ++y) {
for (index_t n = 0; n < dst.size(2); ++n) {
DType mmax = energy[y][0][n];
for (index_t x = 1; x < dst.size(1); ++x) {
if (mmax < energy[y][x][n]) mmax = energy[y][x][n];
}
DType sum = DType(0.0f);
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] = std::exp(energy[y][x][n] - mmax);
sum += dst[y][x][n];
}
for (index_t x = 0; x < dst.size(1); ++x) {
dst[y][x][n] /= sum;
}
}
}
}
template <typename IndexType, typename DType>
inline void AddTakeGrad(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType> &index,
const Tensor<cpu, 2, DType> &src) {
const int K = dst.shape_[0];
for (index_t y = 0; y < index.size(0); ++y) {
int j = index[y];
if (j <= 0)
j = 0;
else if (j >= K)
j = K - 1;
dst[j] += src[y];
}
}
template <typename IndexType, typename DType>
inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType> &sorted,
const Tensor<cpu, 1, IndexType> &index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < sorted.size(0); ++y) {
dst[sorted[y]] += src[index[y]];
}
}
template <typename IndexType, typename DType>
inline void IndexFill(Tensor<cpu, 2, DType> dst,
const Tensor<cpu, 1, IndexType> &index,
const Tensor<cpu, 2, DType> &src) {
for (index_t y = 0; y < index.size(0); ++y) {
for (index_t j = 0; j < src.size(1); j++) {
dst[index[y]][j] = src[y][j];
}
}
}
template <typename KDType, typename VDType>
inline void SortByKey(Tensor<cpu, 1, KDType> keys,
Tensor<cpu, 1, VDType> values, bool is_ascend) {
CHECK_EQ(keys.CheckContiguous(), true);
CHECK_EQ(values.CheckContiguous(), true);
CHECK_EQ(keys.size(0), values.size(0))
<< "The sizes of key/value are not equal! keys_size: " << keys.size(0)
<< "values_size: " << values.size(0);
std::vector<size_t> idx(keys.size(0));
std::vector<KDType> keys_vec(keys.size(0));
std::vector<VDType> values_vec(values.size(0));
for (int i = 0; i < keys.size(0); i++) {
idx[i] = i;
keys_vec[i] = keys[i];
values_vec[i] = values[i];
}
if (is_ascend) {
std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {
return keys_vec[i1] < keys_vec[i2];
});
} else {
std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {
return keys_vec[i1] > keys_vec[i2];
});
}
for (index_t i = 0; i < values.size(0); i++) {
keys[i] = keys_vec[idx[i]];
values[i] = values_vec[idx[i]];
}
}
template <typename Device, typename VDType, typename SDType>
inline void VectorizedSort(Tensor<Device, 1, VDType> values,
Tensor<Device, 1, SDType> segments) {
// We can sort each segments using two stable sorts
SortByKey(values, segments, true);
SortByKey(segments, values, true);
}
// blas related
template <typename Device, typename DType>
inline void VectorDot(Tensor<Device, 1, DType> dst,
const Tensor<Device, 1, DType> &lhs,
const Tensor<Device, 1, DType> &rhs) {
CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch";
CHECK_EQ(dst.size(0), 1U) << "VectorDot: expect dst to be scalar";
expr::BLASEngine<Device, DType>::SetStream(lhs.stream_);
mshadow::expr::BLASEngine<Device, DType>::dot(
lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_);
}
template <bool transpose_left, bool transpose_right, typename Device,
typename DType>
inline void BatchGEMM(Tensor<Device, 3, DType> dst,
const Tensor<Device, 3, DType> &lhs,
const Tensor<Device, 3, DType> &rhs, DType alpha,
DType beta, Tensor<Device, 1, DType *> workspace) {
index_t batch_size = dst.shape_[0];
expr::BLASEngine<Device, DType>::SetStream(dst.stream_);
Shape<3> sleft = transpose_left
? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1])
: lhs.shape_;
Shape<3> sright = transpose_right
? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1])
: rhs.shape_;
CHECK_EQ(dst.CheckContiguous(), true);
CHECK_EQ(lhs.CheckContiguous(), true);
CHECK_EQ(rhs.CheckContiguous(), true);
CHECK(sleft[0] == batch_size && sright[0] == batch_size)
<< "BatchGEMM: batchsize must be equal."
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] &&
sleft[2] == sright[1])
<< "BatchGEMM: matrix shape mismatch"
<< "dst: " << dst.shape_ << "\n"
<< "lhs: " << sleft << "\n"
<< "rhs: " << sright << "\n";
CHECK(workspace.size(0) >= 3 * batch_size)
<< "Workspace Size must be bigger than " << 3 * batch_size;
CHECK_EQ(workspace.CheckContiguous(), true);
// use column major argument to compatible with most BLAS
expr::BLASEngine<Device, DType>::batched_gemm(
dst.stream_, transpose_right, transpose_left,
transpose_right ? rhs.size(1) : rhs.size(2),
transpose_left ? lhs.size(2) : lhs.size(1),
transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_,
rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_,
batch_size, workspace.dptr_);
}
} // namespace mshadow
#endif // MSHADOW_TENSOR_CPU_INL_H_
|
omp_for_collapse.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
/* Utility function to check that i is increasing monotonically
with each call */
static int check_i_islarger (int i)
{
static int last_i;
int islarger;
if (i==1)
last_i=0;
islarger = ((i >= last_i)&&(i - last_i<=1));
last_i = i;
return (islarger);
}
int test_omp_for_collapse()
{
int is_larger = 1;
#pragma omp parallel
{
int i,j;
int my_islarger = 1;
#pragma omp for private(i,j) schedule(static,1) collapse(2) ordered
for (i = 1; i < 100; i++) {
for (j =1; j <100; j++) {
#pragma omp ordered
my_islarger = check_i_islarger(i)&&my_islarger;
}
}
#pragma omp critical
is_larger = is_larger && my_islarger;
}
return (is_larger);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_for_collapse()) {
num_failed++;
}
}
return num_failed;
}
|
verlet_accelerate2.c |
#include "verlet.h"
#include "cloud.h"
void
verlet_step_accelerate (int Np, double dt, const double *restrict X[3], double *restrict U[3])
{
#pragma omp parallel for
for (int i = 0; i < Np; i++) {
for (int j = i + 1; j < Np; j++) {
double du[3];
force (dt, X[0][i], X[1][i], X[2][i], X[0][j], X[1][j], X[2][j], du);
for (int d = 0; d < 3; d++) {
U[d][i] += du[d];
U[d][j] -= du[d];
}
}
}
}
|
omp50_task_depend_mtx2.c | // RUN: %libomp-compile-and-run
// Tests OMP 5.0 task dependences "mutexinoutset", emulates compiler codegen
// Mutually exclusive tasks get input dependency info array sorted differently
//
// Task tree created:
// task0 task1
// \ / \
// task2 task5
// / \
// task3 task4
// / \
// task6 <-->task7 (these two are mutually exclusive)
// \ /
// task8
//
#include <stdio.h>
#include <omp.h>
#ifdef _WIN32
#include <windows.h>
#define mysleep(n) Sleep(n)
#else
#include <unistd.h>
#define mysleep(n) usleep((n)*1000)
#endif
static int checker = 0; // to check if two tasks run simultaneously
static int err = 0;
#ifndef DELAY
#define DELAY 100
#endif
// ---------------------------------------------------------------------------
// internal data to emulate compiler codegen
typedef int(*entry_t)(int, int**);
typedef struct DEP {
size_t addr;
size_t len;
int flags;
} dep;
typedef struct ID {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
int thunk(int gtid, int** pshareds) {
int t = **pshareds;
int th = omp_get_thread_num();
#pragma omp atomic
++checker;
printf("task __%d, th %d\n", t, th);
if (checker != 1) {
err++;
printf("Error1, checker %d != 1\n", checker);
}
mysleep(DELAY);
if (checker != 1) {
err++;
printf("Error2, checker %d != 1\n", checker);
}
#pragma omp atomic
--checker;
return 0;
}
#ifdef __cplusplus
extern "C" {
#endif
int __kmpc_global_thread_num(id*);
extern int** __kmpc_omp_task_alloc(id *loc, int gtid, int flags,
size_t sz, size_t shar, entry_t rtn);
int
__kmpc_omp_task_with_deps(id *loc, int gtid, int **task, int nd, dep *dep_lst,
int nd_noalias, dep *noalias_dep_lst);
static id loc = {0, 2, 0, 0, ";file;func;0;0;;"};
#ifdef __cplusplus
} // extern "C"
#endif
// End of internal data
// ---------------------------------------------------------------------------
int main()
{
int i1,i2,i3,i4;
omp_set_num_threads(2);
#pragma omp parallel
{
#pragma omp single nowait
{
dep sdep[2];
int **ptr;
int gtid = __kmpc_global_thread_num(&loc);
int t = omp_get_thread_num();
#pragma omp task depend(in: i1, i2)
{ int th = omp_get_thread_num();
printf("task 0_%d, th %d\n", t, th);
mysleep(DELAY); }
#pragma omp task depend(in: i1, i3)
{ int th = omp_get_thread_num();
printf("task 1_%d, th %d\n", t, th);
mysleep(DELAY); }
#pragma omp task depend(in: i2) depend(out: i1)
{ int th = omp_get_thread_num();
printf("task 2_%d, th %d\n", t, th);
mysleep(DELAY); }
#pragma omp task depend(in: i1)
{ int th = omp_get_thread_num();
printf("task 3_%d, th %d\n", t, th);
mysleep(DELAY); }
#pragma omp task depend(out: i2)
{ int th = omp_get_thread_num();
printf("task 4_%d, th %d\n", t, th);
mysleep(DELAY+5); } // wait a bit longer than task 3
#pragma omp task depend(out: i3)
{ int th = omp_get_thread_num();
printf("task 5_%d, th %d\n", t, th);
mysleep(DELAY); }
// compiler codegen start
// task1
ptr = __kmpc_omp_task_alloc(&loc, gtid, 0, 28, 16, thunk);
sdep[0].addr = (size_t)&i1;
sdep[0].len = 0; // not used
sdep[0].flags = 4; // mx
sdep[1].addr = (size_t)&i4;
sdep[1].len = 0; // not used
sdep[1].flags = 4; // mx
**ptr = t + 10; // init single shared variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// task2
ptr = __kmpc_omp_task_alloc(&loc, gtid, 0, 28, 16, thunk);
// reverse pointers - library should sort them uniquely
sdep[0].addr = (size_t)&i4;
sdep[1].addr = (size_t)&i1;
**ptr = t + 20; // init single shared variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// compiler codegen end
#pragma omp task depend(in: i1)
{ int th = omp_get_thread_num();
printf("task 8_%d, th %d\n", t, th);
mysleep(DELAY); }
} // single
} // parallel
if (err == 0) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
run_sign_verify.c | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
/*
Sign a message and verify the signature
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <amcl/utils.h>
#include <amcl/randapi.h>
#include <amcl/bls_BLS381.h>
#include <oqs/oqs.h>
#include <pqnist/pqnist.h>
#define NTHREADS 8
#define MAXSIZE 256
#define G2LEN 4*BFS_BLS381
#define SIGLEN BFS_BLS381+1
int main()
{
int i,rc;
// Seed value for CSPRNG
char seed[NTHREADS][PQNIST_SEED_LENGTH];
// Message to be sent to Bob
char p[NTHREADS][MAXSIZE];
octet P[NTHREADS];
// BLS signature
char s[NTHREADS][SIGLEN];
octet S[NTHREADS];
// Initialise seed
for(i=0; i<NTHREADS; i++)
{
for(int j=0; j<PQNIST_SEED_LENGTH; j++)
{
seed[i][j] = i;
}
}
// Generate SIKE and BLS keys
// Bob's SIKE keys (not used)
uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key];
uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key];
// Alice's BLS keys
char BLSsk[NTHREADS][BGS_BLS381];
char BLSpk[NTHREADS][G2LEN];
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
rc = pqnist_keys(seed[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]);
if (rc)
{
fprintf(stderr, "ERROR pqnist_keys rc: %d\n", rc);
exit(EXIT_FAILURE);
}
printf("BLS pklen %d pk: ", G2LEN);
amcl_print_hex(BLSpk[i], G2LEN);
printf("BLS sklen %d BLS sk: ", BGS_BLS381);
amcl_print_hex(BLSsk[i], BGS_BLS381);
printf("\n");
}
// Alice
for(i=0; i<NTHREADS; i++)
{
bzero(p[i],sizeof(p[i]));
P[i].max = MAXSIZE;
P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i);
P[i].val = p[i];
printf("Alice Plaintext: ");
OCT_output_string(&P[i]);
printf("\n");
}
for(i=0; i<NTHREADS; i++)
{
bzero(s[i],sizeof(s[i]));
S[i].max = SIGLEN;
S[i].len = SIGLEN;
S[i].val = s[i];
}
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
// Alice signs message
rc = pqnist_sign(P[i].val, BLSsk[i], S[i].val);
if(rc)
{
fprintf(stderr, "ERROR pqnist_sign rc: %d\n", rc);
printf("FAILURE\n");
exit(EXIT_FAILURE);
}
printf("Alice SIGlen %d SIG", S[i].len);
OCT_output(&S[i]);
printf("\n");
}
#pragma omp parallel for
for(i=0; i<NTHREADS; i++)
{
// Bob verifies message
rc = pqnist_verify(P[i].val, BLSpk[i], S[i].val);
if (rc)
{
fprintf(stderr, "ERROR pqnist_verify rc: %d\n", rc);
exit(EXIT_FAILURE);
}
else
{
printf("SUCCESS Test %d pqnist_verify rc: %d\n", i, rc);
OCT_output_string(&P[i]);
printf("\n");
}
}
// clear memory
for(i=0; i<NTHREADS; i++)
{
OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key);
OQS_MEM_cleanse(BLSsk[i], OQS_SIG_picnic_L5_FS_length_secret_key);
OCT_clear(&P[i]);
OCT_clear(&S[i]);
}
printf("TEST PASSED\n");
exit(EXIT_SUCCESS);
}
|
blur-effect-openmp.c | #include <stdlib.h>
#include <stdio.h>
#include <cv.h>
#include <omp.h>
#include <opencv2/highgui/highgui.hpp>
#define NUM_THREADS 4
using namespace cv;
int main (int argc, char *argv[]) {
//Vaiables
IplImage* img = 0;
int height,width,step,channels,kernel,hilos;
uchar *data;
double delta,start = omp_get_wtime();
int i,error;
void *retval;
//Validacion
if(argc<4){
printf("Faltan argumentos: imagen.jgp kernel #Hilos \n\7");
exit(0);
}
//Cargar la imagen
img=cvLoadImage(argv[1]);
kernel = strtol(argv[2], NULL, 10);
if(!img){
printf("No se pudo cargar el archivo de imagen: %s\n",argv[1]);
exit(0);
}
hilos = strtol(argv[3], NULL, 10);
//Obtener los datos de la imagen
height = img->height;
width = img->width;
step = img->widthStep;
channels = img->nChannels;
data = (uchar *)img->imageData;
#pragma omp parallel num_threads(hilos)
{
int x,xx,y,yy,i,pR,pG,pB,R,G,B;
#pragma omp for schedule(static, width/hilos)
for(xx = 0; xx < width; xx++)
for(yy = 0; yy < height; yy++) {
pB = pG = pR = 0;
i = 0;
for(x = xx; x < width && x < xx + kernel; x++) {
for(y = yy; y < height && y < yy + kernel; y++) {
pR += data[x*3 + y*width*3 + 2];
pG += data[x*3 + y*width*3 + 1];
pB += data[x*3 + y*width*3 + 0];
i++;
}
}
pR = pR/i;
pG = pG/i;
pB = pB/i;
data[xx*3 + yy*width*3 + 2] = pR;
data[xx*3 + yy*width*3 + 1] = pG;
data[xx*3 + yy*width*3 + 0] = pB;
}
}
delta = omp_get_wtime() - start;
printf("Openmp %s with %d threads computed in %.4g seconds\n", argv[1], hilos, delta);
//Imagen Borrosa
char str[16]="blur-openmp-";
char *str1=argv[1];
Mat m = cvarrToMat(img);
imwrite(strcat(str,str1), m);
cvReleaseImage(&img);
return 0;
} |
mvt.c | /**
* mvt.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#define N SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_common_arrays(DATA_TYPE *A, DATA_TYPE *y1, DATA_TYPE *y2) {
int i, j;
for (i = 0; i < N; i++) {
y1[i] = ((DATA_TYPE)i + 3) / N;
y2[i] = ((DATA_TYPE)i + 4) / N;
for (j = 0; j < N; j++) {
A[i * N + j] = ((DATA_TYPE)i * j) / N;
}
}
}
void init_vector_x(DATA_TYPE *x1, DATA_TYPE *x2) {
int i, j;
for (i = 0; i < N; i++) {
x1[i] = ((DATA_TYPE)i) / N;
x2[i] = ((DATA_TYPE)i + 1) / N;
}
}
void runMvt(DATA_TYPE *a, DATA_TYPE *x1, DATA_TYPE *x2, DATA_TYPE *y1,
DATA_TYPE *y2) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x1[i] = x1[i] + a[i * N + j] * y1[j];
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x2[i] = x2[i] + a[j * N + i] * y2[j];
}
}
}
void runMvt_OMP(DATA_TYPE *a, DATA_TYPE *x1, DATA_TYPE *x2, DATA_TYPE *y1,
DATA_TYPE *y2) {
int i, j;
#pragma omp target teams map(to: a[:N*N], y1[:N], y2[:N]) map(tofrom: x1[:N], x2[:N]) device(OMP_DEVICE_ID)
{
#pragma omp distribute parallel for private(j)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x1[i] = x1[i] + a[i * N + j] * y1[j];
}
}
#pragma omp distribute parallel for private(j)
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
x2[i] = x2[i] + a[j * N + i] * y2[j];
}
}
}
}
int compareResults(DATA_TYPE *x1, DATA_TYPE *x1_outputFromGpu, DATA_TYPE *x2,
DATA_TYPE *x2_outputFromGpu) {
int i, fail;
fail = 0;
for (i = 0; i < N; i++) {
if (percentDiff(x1[i], x1_outputFromGpu[i]) >
ERROR_THRESHOLD) {
fail++;
}
if (percentDiff(x2[i], x2_outputFromGpu[i]) >
ERROR_THRESHOLD) {
fail++;
}
}
return fail;
}
int main() {
fprintf(stdout, "<< Matrix Vector Product and Transpose >>\n");
// Declare arrays and allocate memory for common arrays
DATA_TYPE *a = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE));
DATA_TYPE *x1 = NULL;
DATA_TYPE *x2 = NULL;
DATA_TYPE *x1_OMP = NULL;
DATA_TYPE *x2_OMP = NULL;
DATA_TYPE *y_1 = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
DATA_TYPE *y_2 = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
// Initialize common memory
init_common_arrays(a, y_1, y_2);
// run OMP on GPU or CPU if enabled
#if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU)
x1_OMP = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE));
x2_OMP = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE));
init_vector_x(x1_OMP, x2_OMP);
BENCHMARK_OMP(runMvt_OMP(a, x1_OMP, x2_OMP, y_1, y_2));
// prevent dead-code elimination
DCE_PREVENT(x1_OMP, N);
DCE_PREVENT(x2_OMP, N);
#endif
// run sequential version if enabled
#ifdef RUN_CPU_SEQ
x1 = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE));
x2 = (DATA_TYPE *) malloc(N * sizeof(DATA_TYPE));
init_vector_x(x1, x2);
BENCHMARK_CPU(runMvt(a, x1, x2, y_1, y_2));
// prevent dead-code elimination
DCE_PREVENT(x1, N);
DCE_PREVENT(x2, N);
#endif
// if TEST is enabled, then compare OMP results against sequential mode
int fail = 0;
#ifdef RUN_TEST
fail = compareResults(x1, x1_OMP, x2, x2_OMP);
printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail);
#endif
// Release memory
free(a);
free(x1);
free(x2);
free(x1_OMP);
free(x2_OMP);
free(y_1);
free(y_2);
return fail;
}
|
constCurvModel2d.c | /* Include constant curvature drivers for all dimensions here */
/* System headers */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/* Local headers */
#include "QSSLIB_config.h"
#include "qss_options.h"
#include "qss_spatial_derivatives2d.h"
#include "qss_tvd_runge_kutta2d.h"
#include "qss_data_arrays.h"
#include "qss_util2d.h"
#include "qss_macros.h"
#include "qss_reinitialization2d.h"
#include "qss_grid.h"
#include "constCurvModel2d.h"
#include "qss_general_util.h"
#include "connectivity.h"
/* Main driver for constant curvature level set method model */
QSSLIB_REAL constCurvModel2d(Options *options,QSS_DataArrays *p, Grid *g, FILE *fp_out, QSSLIB_REAL a0)
{
QSSLIB_REAL zero = 0.0;
char fname[256];
int flag = 0, OUTER_STEP = 0, INNER_STEP, idx;
QSSLIB_REAL dt = 0, dt_sub, t = 0;
QSSLIB_REAL mask_sign = -1;
QSSLIB_REAL eps, cur_max_H_over_dX = -1, cfl_number = 0.9;
QSSLIB_REAL max_abs_err = 100, vol_phi = 100, vol_phi_old, vol_very_small;
vol_very_small = (g->dx)[0]*(g->dx)[1];
eps = (options->eps_coefficient)*(g->dx[0]);
QSS2D_VOLUME_REGION_PHI_LESS_THAN_ZERO(&vol_phi, p->phi, GB_DIMS_2D, FB_DIMS_2D,
&(g->dx[0]),&(g->dx[1]), &eps);
while( (t < options->tmax) && (max_abs_err > options->eps_stop) && (vol_phi > vol_very_small))
{
/* outer loop */
OUTER_STEP++;
dt_sub = 0;
COPY_DATA(p->phi_prev,p->phi,g)
vol_phi_old = vol_phi;
/* Begin parallel region */
#pragma omp parallel default(none) shared(p, g, flag, cfl_number,\
cur_max_H_over_dX, zero, dt_sub, vol_phi, max_abs_err, eps, options) \
private(INNER_STEP, dt)
{
INNER_STEP = 0;
QSSLIB_REAL max_H_over_dX;
int bdry_location_idx = 9; /* all boundaries */
QSSLIB_REAL disconn_overlap = 0;
/* Set up variables for multi-threading */
int cur_thread, cur_jlo_fb, cur_jhi_fb, num_threads, nslices, i;
int cur_jlo_gb, cur_jhi_gb;
cur_thread = omp_get_thread_num();
num_threads = omp_get_num_threads();
if (num_threads > 1) flag = 1;
nslices = g->jhi_fb - g->jlo_fb + 1;
cur_jlo_fb = g->jlo_fb + nslices*cur_thread/num_threads;
cur_jhi_fb = g->jlo_fb + nslices*(cur_thread + 1)/num_threads - 1;
double t1 = omp_get_wtime();
/* Keeping track of thread-local ghost boundaries, mainly for imposing mask */
if (cur_jhi_fb > (g->jhi_fb))
cur_jhi_fb = (g->jhi_fb);
if (cur_thread == 0)
cur_jlo_gb = cur_jlo_fb - 3;
else
cur_jlo_gb = cur_jlo_fb;
if (cur_thread == (num_threads - 1))
cur_jhi_gb = cur_jhi_fb + 3;
else
cur_jhi_gb = cur_jhi_fb;
while( dt_sub < options->tplot )
{
/* inner loop */
INNER_STEP++;
QSS2D_GET_RHS_SECOND(p->phi, p->normal_velocity, p->curvature_coeff,
p->external_velocity_x, p->external_velocity_y,
&(max_H_over_dX), p->lse_rhs, GB_DIMS_2D, FB_DIMS_PAR_2D,
&((g->dx)[0]),&((g->dx)[1]));
#pragma omp barrier
#pragma omp critical
{
if(max_H_over_dX > cur_max_H_over_dX)
cur_max_H_over_dX = max_H_over_dX;
}
/*
Barrier to ensure same cur_max_H_over_dX across all threads.
*/
#pragma omp barrier
/* get final correct dt due to parabolic (curvature) term */
dt = cfl_number / (cur_max_H_over_dX + options->b_max_over_dx + options->max_U_over_dx);
QSS2D_RK1_STEP(p->phi_next,GB_DIMS_2D,p->phi,GB_DIMS_2D,p->lse_rhs,
GB_DIMS_2D, FB_DIMS_PAR_2D, &dt);
#pragma omp barrier
IMPOSE_MASK_PAR_2D(p->phi, p->mask, p->phi_next, &(options->overlap),
GB_DIMS_2D, &(cur_jlo_gb), &(cur_jhi_gb));
#pragma omp barrier
IMPOSE_MASK_PAR_2D(p->phi, p->mask_w, p->phi, &(disconn_overlap),
GB_DIMS_2D, &(cur_jlo_gb), &(cur_jhi_gb));
#pragma omp barrier
IMPOSE_MASK_PAR_2D(p->phi, p->mask_nw, p->phi, &(disconn_overlap),
GB_DIMS_2D, &(cur_jlo_gb), &(cur_jhi_gb));
#pragma omp barrier
/* boundary conditions */
#pragma omp single
{
signedLinearExtrapolationBCqss(p->phi,g,bdry_location_idx);
cur_max_H_over_dX = -1;
max_H_over_dX = -1;
}
/*
Implicit barrier after omp single, so all threads should sync here
*/
#pragma omp single
{
SET_DATA_TO_CONSTANT(p->lse_rhs,g,zero);
dt_sub += dt;
}
} /* End inner loop */
double t2 = omp_get_wtime();
if (cur_thread == 0)
printf("%d threads: Level set time = %lf\n", num_threads, t2 - t1);
} /* End parallel region */
/*
Reinitialization of the level set function
- may want to parallelize the following functions later
*/
t += dt_sub;
double t3 = omp_get_wtime();
if(options->check_connectivity) //if (((int)t % 1) == 0)
trapComponents_mask(p, g, options);
printf("Reinitializing....");
reinitialize2d_subcell_fix_qss(p,g,options);
printf("Reinitialized\n");
/* compute stopping criteria */
/* max abs error */
QSS2D_MAX_NORM_DIFF_LOCAL(&max_abs_err,p->phi,GB_DIMS_2D, p->phi_prev,
GB_DIMS_2D, FB_DIMS_2D, &(options->err_check_zone));
QSSLIB_REAL vol_phi_old = vol_phi;
QSS2D_VOLUME_REGION_PHI_LESS_THAN_ZERO(&vol_phi, p->phi, GB_DIMS_2D, FB_DIMS_2D,
&(g->dx[0]),&(g->dx[1]), &eps);
if (fabsf(vol_phi - vol_phi_old) < 0.1*options->eps_stop) break;
if(options->checkpoint)
{
sprintf(fname,"checkpoint_phi");
writeDataArrayQSS(p->phi,g,fname,GZIP);
sprintf(fname,"checkpoint_phi_prev");
writeDataArrayQSS(p->phi_prev,g,fname,GZIP);
sprintf(fname,"mask_w");
writeDataArrayQSS(p->mask_w,g,fname,GZIP);
sprintf(fname,"mask_nw");
writeDataArrayQSS(p->mask_nw,g,fname,GZIP);
}
double t4 = omp_get_wtime();
printf("connectivity time = %lf\n", t4 - t3);
printf("t = %f\t",t);
printf("max_abs_err = %4.3f,\t",max_abs_err);
printf("vol_nwp = %4.3f\n",vol_phi);
/* If nw phase saturation isn't changing much, then continue on */
//if ((vol_phi - vol_phi_old < options->eps_stop) && (max_abs_err < 0.1))
// break;
} /* End outer loop */
/* Merge disconnected components for writing to saved data */
MERGE_SETS(p->phi, p->mask_nw, g);
return t;
}
|
GB_unop__sqrt_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sqrt_fc32_fc32)
// op(A') function: GB (_unop_tran__sqrt_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = csqrtf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csqrtf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = csqrtf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SQRT || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sqrt_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csqrtf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = csqrtf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sqrt_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rect2lune.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "compearth.h"
#ifdef COMPEARTH_USE_MKL
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wreserved-id-macro"
#pragma clang diagnostic ignored "-Wstrict-prototypes"
#endif
#include <mkl_cblas.h>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#else
#include <cblas.h>
#endif
/*!
* @brief Converts v-w coordinates to lune coordinates.
*
* @param[in] nv Number of v coordinates.
* @param[in] v v coordinates s.t. \f$ v \in [-1/3, 1/3] \f$. This
* an array of dimension [nv].
* @param[in] nw Number of w coordinates.
* @param[in] w w coordinates (similar to lune latitudes) s.t.
* \f$ w \in [-3\pi/8, 3/pi/8] \f$. This is an
* array of dimension [nw].
*
* @param[out] gamma Longitude (degrees) \f$ \gamma \in [-30,30] \f$.
* This is an array of dimension [nv].
* @param[out] delta lune latitude (degrees) \f$ \delta \in [-90,90] \f$.
* This is an array of dimension [nw].
*
* @result 0 indicates success.
*
* @author Carl Tape and converted to C by Ben Baker
*
* @date 2016
*
* @copyright MIT
*
*/
int compearth_rect2lune(const int nv, const double *__restrict__ v,
const int nw, const double *__restrict__ w,
double *__restrict__ gamma,
double *__restrict__ delta)
{
double u[CE_CHUNKSIZE] __attribute__((aligned(64)));
int i, j, ierr, nuLoc;
/*
const int maxit = 20;
const int useHalley = 2;
const double tol = 1.e-12;
*/
const double pi38 = 3.0*M_PI/8.0;
const double pi180i = 180.0/M_PI;
ierr = 0;
// convert v -> gamma
compearth_v2gamma(nv, v, gamma);
// convert latitude to colatitude
for (i=0; i<nw; i=i+CE_CHUNKSIZE)
{
nuLoc = MIN(CE_CHUNKSIZE, nw - i);
for (j=0; j<nuLoc; j++)
{
u[j] = pi38 - w[i+j];
}
// convert u -> beta
ierr = compearth_u2beta(nuLoc, u, &delta[i]);
//ierr = compearth_u2beta(nuLoc, maxit, useHalley, u, tol, &delta[i]);
if (ierr != 0)
{
fprintf(stderr, "%s: Computing u2beta\n", __func__);
return -1;
}
}
// convert to gamma to degrees
cblas_dscal(nv, pi180i, gamma, 1);
// convert delta from colatitude to latitude and radians to degrees
#pragma omp simd
for (i=0; i<nw; i++)
{
delta[i] = 90.0 - pi180i*delta[i];
}
return ierr;
}
|
convolution_4x4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv4x4s4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p*inch*16 + q*16;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0+4);
float32x4_t _k891011 = vld1q_f32(kernel0+8);
float32x4_t _k12131415 = vld1q_f32(kernel0+12);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 4;
const float* k2 = kernel0 + 8;
const float* k3 = kernel0 + 12;
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _r21 = vld1q_f32(r2 + 4);
float32x4_t _r31 = vld1q_f32(r3 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r12 = vld1q_f32(r1 + 8);
float32x4_t _r22 = vld1q_f32(r2 + 8);
float32x4_t _r32 = vld1q_f32(r3 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r13 = vld1q_f32(r1 + 12);
float32x4_t _r23 = vld1q_f32(r2 + 12);
float32x4_t _r33 = vld1q_f32(r3 + 12);
float32x4_t _sum0 = vmulq_f32(_r00, _k0123);
float32x4_t _sum1 = vmulq_f32(_r01, _k0123);
float32x4_t _sum2 = vmulq_f32(_r02, _k0123);
float32x4_t _sum3 = vmulq_f32(_r03, _k0123);
_sum0 = vfmaq_f32(_sum0, _r10, _k4567);
_sum1 = vfmaq_f32(_sum1, _r11, _k4567);
_sum2 = vfmaq_f32(_sum2, _r12, _k4567);
_sum3 = vfmaq_f32(_sum3, _r13, _k4567);
_sum0 = vfmaq_f32(_sum0, _r20, _k891011);
_sum1 = vfmaq_f32(_sum1, _r21, _k891011);
_sum2 = vfmaq_f32(_sum2, _r22, _k891011);
_sum3 = vfmaq_f32(_sum3, _r23, _k891011);
_sum0 = vfmaq_f32(_sum0, _r30, _k12131415);
_sum1 = vfmaq_f32(_sum1, _r31, _k12131415);
_sum2 = vfmaq_f32(_sum2, _r32, _k12131415);
_sum3 = vfmaq_f32(_sum3, _r33, _k12131415);
float32x4_t _s01 = vpaddq_f32(_sum0, _sum1);
float32x4_t _s23 = vpaddq_f32(_sum2, _sum3);
float32x4_t _sum = vpaddq_f32(_s01, _s23);
float32x4_t _outp = vld1q_f32(outptr);
_outp = vaddq_f32(_outp, _sum);
vst1q_f32(outptr, _sum);
r0 += 16;
r1 += 16;
r2 += 16;
r3 += 16;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%1, #128] \n"
"0: \n"
"pld [%2, #512] \n"
"pld [%3, #512] \n"
"vld1.f32 {d14-d15}, [%1] \n"// q7 = outptr
"vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1
"pld [%4, #512] \n"
"pld [%5, #512] \n"
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q5, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q6, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q14, q12, q13 \n"
"vld1.f32 {d16-d17}, [%2]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%3]! \n"// q9 = r1
"vmul.f32 q12, q8, %q12 \n"
"vmul.f32 q13, q9, %q13 \n"
"vld1.f32 {d20-d21}, [%4]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%5]! \n"// q11 = r3
"vmla.f32 q12, q10, %q14 \n"
"vmla.f32 q13, q11, %q15 \n"
"vadd.f32 q15, q12, q13 \n"
"vadd.f32 d10, d10, d11 \n"
"vadd.f32 d28, d28, d29 \n"
"vadd.f32 d11, d12, d13 \n"
"vadd.f32 d29, d30, d31 \n"
"vpadd.f32 d10, d10, d11 \n"
"vpadd.f32 d11, d28, d29 \n"
"vadd.f32 q7, q7, q5 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k0123), // %12
"w"(_k4567), // %13
"w"(_k891011), // %14
"w"(_k12131415) // %15
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
_sum = vmlaq_f32(_sum, _r1, _k4567);
_sum = vmlaq_f32(_sum, _r2, _k891011);
_sum = vmlaq_f32(_sum, _r3, _k12131415);
*outptr += vaddvq_f32(_sum);
#else
float sum = 0.f;
asm volatile(
"vld1.f32 {d16-d17}, [%0]! \n"// q8 = r0
"vld1.f32 {d18-d19}, [%1]! \n"// q9 = r1
"vmul.f32 q12, q8, %q9 \n"
"vmul.f32 q13, q9, %q10 \n"
"vld1.f32 {d20-d21}, [%2]! \n"// q10 = r2
"vld1.f32 {d22-d23}, [%3]! \n"// q11 = r3
"vmla.f32 q12, q10, %q11 \n"
"vmla.f32 q13, q11, %q12 \n"
"vadd.f32 q5, q12, q13 \n"
"vadd.f32 d10, d10, d11 \n"
"vpadd.f32 d10, d10, d10 \n"
"vmov.f32 %4, d10[0] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(sum) // %4
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"w"(_k0123), // %9
"w"(_k4567), // %10
"w"(_k891011), // %11
"w"(_k12131415) // %12
: "cc", "memory", "q5", "q6", "q8", "q9", "q10", "q11", "q12", "q13"
);
*outptr += sum;
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
*outptr += sum;
#endif // __ARM_NEON
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
outptr++;
}
r0 += w * 3;
r1 += w * 3;
r2 += w * 3;
r3 += w * 3;
}
}
}
}
|
resize_bilinear.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_RESIZE_BILINEAR_H_
#define MACE_KERNELS_RESIZE_BILINEAR_H_
#include <algorithm>
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
struct CachedInterpolation {
index_t lower; // Lower source index used in the interpolation
index_t upper; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
float lerp;
};
inline float CalculateResizeScale(index_t in_size,
index_t out_size,
bool align_corners) {
return (align_corners && out_size > 1)
? (in_size - 1) / static_cast<float>(out_size - 1)
: in_size / static_cast<float>(out_size);
}
inline void ComputeInterpolationWeights(
const index_t out_size,
const index_t in_size,
const float scale,
CachedInterpolation *interpolation) {
interpolation[out_size].lower = 0;
interpolation[out_size].upper = 0;
for (index_t i = out_size - 1; i >= 0; --i) {
const float in = i * scale;
interpolation[i].lower = static_cast<index_t>(in);
interpolation[i].upper = std::min(interpolation[i].lower + 1, in_size - 1);
interpolation[i].lerp = in - interpolation[i].lower;
}
}
inline float ComputeLerp(const float top_left,
const float top_right,
const float bottom_left,
const float bottom_right,
const float x_lerp,
const float y_lerp) {
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
return top + (bottom - top) * y_lerp;
}
inline void ResizeImage(const float *images,
const index_t batch_size,
const index_t in_height,
const index_t in_width,
const index_t out_height,
const index_t out_width,
const index_t channels,
const std::vector<CachedInterpolation> &xs_vec,
const std::vector<CachedInterpolation> &ys,
float *output) {
const CachedInterpolation *xs = xs_vec.data();
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < batch_size; ++b) {
for (index_t c = 0; c < channels; ++c) {
const float
*channel_input_ptr =
images + (b * channels + c) * in_height * in_width;
float *channel_output_ptr =
output + (b * channels + c) * out_height * out_width;
for (index_t y = 0; y < out_height; ++y) {
const float *y_lower_input_ptr =
channel_input_ptr + ys[y].lower * in_width;
const float *y_upper_input_ptr =
channel_input_ptr + ys[y].upper * in_width;
const float ys_lerp = ys[y].lerp;
for (index_t x = 0; x < out_width; ++x) {
const float xs_lerp = xs[x].lerp;
const float top_left = y_lower_input_ptr[xs[x].lower];
const float top_right = y_lower_input_ptr[xs[x].upper];
const float bottom_left = y_upper_input_ptr[xs[x].lower];
const float bottom_right = y_upper_input_ptr[xs[x].upper];
channel_output_ptr[y * out_width + x] =
ComputeLerp(top_left, top_right, bottom_left,
bottom_right, xs_lerp, ys_lerp);
}
}
}
}
}
struct ResizeBilinearFunctorBase {
ResizeBilinearFunctorBase(const std::vector<index_t> &size,
bool align_corners)
: align_corners_(align_corners) {
MACE_CHECK(size.size() == 2);
out_height_ = size[0];
out_width_ = size[1];
}
protected:
bool align_corners_;
index_t out_height_;
index_t out_width_;
};
template<DeviceType D, typename T>
struct ResizeBilinearFunctor;
template<>
struct ResizeBilinearFunctor<DeviceType::CPU, float>
: ResizeBilinearFunctorBase {
ResizeBilinearFunctor(const std::vector<index_t> &size, bool align_corners)
: ResizeBilinearFunctorBase(size, align_corners) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
const index_t batch = input->dim(0);
const index_t channels = input->dim(1);
const index_t in_height = input->dim(2);
const index_t in_width = input->dim(3);
index_t out_height = out_height_;
index_t out_width = out_width_;
MACE_CHECK(out_height > 0 && out_width > 0);
std::vector<index_t> out_shape{batch, channels, out_height, out_width};
MACE_RETURN_IF_ERROR(output->Resize(out_shape));
Tensor::MappingGuard input_mapper(input);
Tensor::MappingGuard output_mapper(output);
const float *input_data = input->data<float>();
float *output_data = output->mutable_data<float>();
if (out_height == in_height && out_width == in_width) {
std::copy(input_data,
input_data + batch * channels * in_height * in_width,
output_data);
return MACE_SUCCESS;
}
float height_scale =
CalculateResizeScale(in_height, out_height, align_corners_);
float width_scale =
CalculateResizeScale(in_width, out_width, align_corners_);
std::vector<CachedInterpolation> ys(out_height + 1);
std::vector<CachedInterpolation> xs(out_width + 1);
// Compute the cached interpolation weights on the x and y dimensions.
ComputeInterpolationWeights(out_height, in_height, height_scale, ys.data());
ComputeInterpolationWeights(out_width, in_width, width_scale, xs.data());
ResizeImage(input_data, batch, in_height, in_width, out_height, out_width,
channels, xs, ys, output_data);
return MACE_SUCCESS;
}
};
#ifdef MACE_ENABLE_OPENCL
template<typename T>
struct ResizeBilinearFunctor<DeviceType::GPU, T>
: ResizeBilinearFunctorBase {
ResizeBilinearFunctor(const std::vector<index_t> &size, bool align_corners)
: ResizeBilinearFunctorBase(size, align_corners) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_RESIZE_BILINEAR_H_
|
WrapOpenMP.h | /**
* @file WrapOpenMP.h
* @author F. Gratl
* @date 4/20/18
*/
#pragma once
/**
* Provide non-OpenMP versions of the most common OpenMP function calls,
* so that they don't have to be wrapped in ifdef-s every time.
*
* Proper wrapper and renaming necessary, because of -fopenmp-simd handling of
* gcc.
*
* Extend when necessary.
*/
#if defined(AUTOPAS_OPENMP)
#include <omp.h>
#else
#include "ExceptionHandler.h"
#endif
namespace autopas {
#if defined(AUTOPAS_OPENMP)
/**
* Wrapper for omp_get_thread_num().
* @return Id of the current thread.
*/
inline int autopas_get_thread_num() { return omp_get_thread_num(); }
/**
* Wrapper for omp_get_num_thread().
* @return Number of currently active threads.
*/
inline int autopas_get_num_threads() { return omp_get_num_threads(); }
/**
* Wrapper for omp_get_max_threads().
* @return Number of threads that can be activated.
*/
inline int autopas_get_max_threads() { return omp_get_max_threads(); }
/**
* AutoPasLock for the openmp case, this wraps a omp_lock_t object. To make it copyable, etc.
*/
class AutoPasLock {
public:
/**
* Default constructor
*/
AutoPasLock() { omp_init_lock(&_lock); }
/**
* Move Constructor
*/
AutoPasLock(AutoPasLock &&) noexcept { omp_init_lock(&_lock); }
/**
* Copy constructor
*/
AutoPasLock(const AutoPasLock &) { omp_init_lock(&_lock); }
/**
* Assignment operator
* @return reference to this object after copy
*/
AutoPasLock &operator=(AutoPasLock) = delete;
/**
* Destructor
*/
~AutoPasLock() { omp_destroy_lock(&_lock); }
/**
* Acquire the lock.
*/
void lock() { omp_set_lock(&_lock); }
/**
* Release the lock.
*/
void unlock() { omp_unset_lock(&_lock); }
private:
omp_lock_t _lock;
};
/**
* Custom reductions:
*/
// reduction for merging vectors: {1,2} + {2,3} -> {1,2,2,3}
#pragma omp declare reduction(vecMerge : std::vector<size_t> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end()))
#pragma omp declare reduction(vecMerge : std::vector<double> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end()))
#else
/**
* Dummy for omp_set_lock() when no OpenMP is available.
* @return Always 0.
*/
inline int autopas_get_thread_num() { return 0; }
/**
* Dummy for omp_get_num_threads() when no OpenMP is available.
* @return Always 1.
*/
inline int autopas_get_num_threads() { return 1; }
/**
* Dummy for omp_get_max_threads() when no OpenMP is available.
* @return Always 1.
*/
inline int autopas_get_max_threads() { return 1; }
/**
* AutoPasLock for the sequential case.
*/
class AutoPasLock {
public:
/**
* Default constructor
*/
AutoPasLock() { _locked = false; }
/**
* Move Constructor
*/
AutoPasLock(AutoPasLock &&) noexcept { _locked = false; }
/**
* Copy constructor
*/
AutoPasLock(AutoPasLock &) { _locked = false; }
/**
* Assignment operator
* @return reference to this object after copy
*/
AutoPasLock &operator=(AutoPasLock) = delete;
/**
* Destructor
*/
~AutoPasLock() {
if (_locked) {
utils::ExceptionHandler::exception("AutoPasLocked destroyed in locked state.");
}
}
/**
* Acquire the lock.
*/
void lock() {
if (_locked) {
utils::ExceptionHandler::exception("Tried to acquire a locked lock.");
}
_locked = true;
}
/**
* Release the lock.
*/
void unlock() {
if (not _locked) {
utils::ExceptionHandler::exception("Tried to release an unlocked lock.");
}
_locked = false;
}
private:
// true if locked, false if unlocked
bool _locked;
};
#endif
} // namespace autopas |
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Kind of the directive.
OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
/// Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// Ending location of the directive.
SourceLocation EndLoc;
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
if (!Data)
return llvm::None;
return Data->getClauses();
}
protected:
/// Data, associated with the directive.
OMPChildren *Data = nullptr;
/// Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)) {}
template <typename T, typename... Params>
static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt,
NumChildren),
alignof(T));
auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses,
AssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T, typename... Params>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt = false,
unsigned NumChildren = 0) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T;
Inst->Data = Data;
return Inst;
}
public:
/// Iterates over expressions/statements used in the construct.
class used_clauses_child_iterator
: public llvm::iterator_adaptor_base<
used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator,
std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> {
ArrayRef<OMPClause *>::iterator End;
OMPClause::child_iterator ChildI, ChildEnd;
void MoveToNext() {
if (ChildI != ChildEnd)
return;
while (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
if (ChildI != ChildEnd)
return;
}
}
}
public:
explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses)
: used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
MoveToNext();
}
}
Stmt *operator*() const { return *ChildI; }
Stmt *operator->() const { return **this; }
used_clauses_child_iterator &operator++() {
++ChildI;
if (ChildI != ChildEnd)
return *this;
if (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
}
}
MoveToNext();
return *this;
}
};
static llvm::iterator_range<used_clauses_child_iterator>
used_clauses_children(ArrayRef<OMPClause *> Clauses) {
return {used_clauses_child_iterator(Clauses),
used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))};
}
/// Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
auto Clauses = getClausesOfKind<SpecificClause>();
if (Clauses.begin() != Clauses.end()) {
assert(std::next(Clauses.begin()) == Clauses.end() &&
"There are at least 2 clauses of the specified kind");
return *Clauses.begin();
}
return nullptr;
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// Returns starting location of directive kind.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns ending location of directive.
SourceLocation getEndLoc() const { return EndLoc; }
/// Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Get number of clauses.
unsigned getNumClauses() const {
if (!Data)
return 0;
return Data->getNumClauses();
}
/// Returns specified clause.
///
/// \param I Number of clause.
///
OMPClause *getClause(unsigned I) const { return clauses()[I]; }
/// Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); }
/// Returns statement associated with the directive.
const Stmt *getAssociatedStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt();
}
Stmt *getAssociatedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getAssociatedStmt();
}
/// Returns the captured statement associated with the
/// component region within the (combined) directive.
///
/// \param RegionKind Component region kind.
const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getCapturedStmt(RegionKind, CaptureRegions);
}
/// Get innermost captured statement for the construct.
CapturedStmt *getInnermostCapturedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getInnermostCapturedStmt(CaptureRegions);
}
const CapturedStmt *getInnermostCapturedStmt() const {
return const_cast<OMPExecutableDirective *>(this)
->getInnermostCapturedStmt();
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!Data)
return child_range(child_iterator(), child_iterator());
return Data->getAssociatedStmtAsRange();
}
const_child_range children() const {
return const_cast<OMPExecutableDirective *>(this)->children();
}
ArrayRef<OMPClause *> clauses() const {
if (!Data)
return llvm::None;
return Data->getClauses();
}
/// Returns whether or not this is a Standalone directive.
///
/// Stand-alone directives are executable directives
/// that have no associated user code.
bool isStandaloneDirective() const;
/// Returns the AST node representing OpenMP structured-block of this
/// OpenMP executable directive,
/// Prerequisite: Executable Directive must not be Standalone directive.
const Stmt *getStructuredBlock() const {
return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock();
}
Stmt *getStructuredBlock();
const Stmt *getRawStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getRawStmt();
}
Stmt *getRawStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getRawStmt();
}
};
/// This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelDirective()
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum = 0;
/// Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
IterationVariableOffset = 0,
LastIterationOffset = 1,
CalcLastIterationOffset = 2,
PreConditionOffset = 3,
CondOffset = 4,
InitOffset = 5,
IncOffset = 6,
PreInitsOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays).
DefaultEnd = 8,
// The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
NumIterationsOffset = 15,
// Offset to the end for worksharing loop directives.
WorksharingEnd = 16,
PrevLowerBoundVariableOffset = 16,
PrevUpperBoundVariableOffset = 17,
DistIncOffset = 18,
PrevEnsureUpperBoundOffset = 19,
CombinedLowerBoundVariableOffset = 20,
CombinedUpperBoundVariableOffset = 21,
CombinedEnsureUpperBoundOffset = 22,
CombinedInitOffset = 23,
CombinedConditionOffset = 24,
CombinedNextLowerBoundOffset = 25,
CombinedNextUpperBoundOffset = 26,
CombinedDistConditionOffset = 27,
CombinedParForInDistConditionOffset = 28,
// Offset to the end (and start of the following
// counters/updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays) for combined distribute loop directives.
CombinedDistributeEnd = 29,
};
/// Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind())]);
return llvm::makeMutableArrayRef(Storage, CollapsedNum);
}
/// Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
CollapsedNum]);
return llvm::makeMutableArrayRef(Storage, CollapsedNum);
}
/// Get the updates storage.
MutableArrayRef<Expr *> getInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
2 * CollapsedNum]);
return llvm::makeMutableArrayRef(Storage, CollapsedNum);
}
/// Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
3 * CollapsedNum]);
return llvm::makeMutableArrayRef(Storage, CollapsedNum);
}
/// Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
4 * CollapsedNum]);
return llvm::makeMutableArrayRef(Storage, CollapsedNum);
}
/// Get the dependent counters storage.
MutableArrayRef<Expr *> getDependentCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
5 * CollapsedNum]);
return llvm::makeMutableArrayRef(Storage, CollapsedNum);
}
/// Get the dependent inits storage.
MutableArrayRef<Expr *> getDependentInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
6 * CollapsedNum]);
return llvm::makeMutableArrayRef(Storage, CollapsedNum);
}
/// Get the finals conditions storage.
MutableArrayRef<Expr *> getFinalsConditions() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
7 * CollapsedNum]);
return llvm::makeMutableArrayRef(Storage, CollapsedNum);
}
protected:
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
///
OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPExecutableDirective(SC, Kind, StartLoc, EndLoc),
CollapsedNum(CollapsedNum) {}
/// Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
/// Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
8 * CollapsedNum; // Counters, PrivateCounters, Inits,
// Updates, Finals, DependentCounters,
// DependentInits, FinalsConditions.
}
void setIterationVariable(Expr *IV) {
Data->getChildren()[IterationVariableOffset] = IV;
}
void setLastIteration(Expr *LI) {
Data->getChildren()[LastIterationOffset] = LI;
}
void setCalcLastIteration(Expr *CLI) {
Data->getChildren()[CalcLastIterationOffset] = CLI;
}
void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; }
void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; }
void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; }
void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; }
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[IsLastIterVariableOffset] = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[LowerBoundVariableOffset] = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[UpperBoundVariableOffset] = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[StrideVariableOffset] = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[EnsureUpperBoundOffset] = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextLowerBoundOffset] = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextUpperBoundOffset] = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NumIterationsOffset] = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[DistIncOffset] = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB;
}
void setCombinedLowerBoundVariable(Expr *CombLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB;
}
void setCombinedUpperBoundVariable(Expr *CombUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB;
}
void setCombinedEnsureUpperBound(Expr *CombEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB;
}
void setCombinedInit(Expr *CombInit) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedInitOffset] = CombInit;
}
void setCombinedCond(Expr *CombCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedConditionOffset] = CombCond;
}
void setCombinedNextLowerBound(Expr *CombNLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB;
}
void setCombinedNextUpperBound(Expr *CombNUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB;
}
void setCombinedDistCond(Expr *CombDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedDistConditionOffset] = CombDistCond;
}
void setCombinedParForInDistCond(Expr *CombParForInDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedParForInDistConditionOffset] =
CombParForInDistCond;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
void setDependentCounters(ArrayRef<Expr *> A);
void setDependentInits(ArrayRef<Expr *> A);
void setFinalsConditions(ArrayRef<Expr *> A);
public:
/// The expressions built to support OpenMP loops in combined/composite
/// pragmas (e.g. pragma omp distribute parallel for)
struct DistCombinedHelperExprs {
/// DistributeLowerBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *LB;
/// DistributeUpperBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *UB;
/// DistributeEnsureUpperBound - used when composing 'omp distribute'
/// with 'omp for' in a same construct, EUB depends on DistUB
Expr *EUB;
/// Distribute loop iteration variable init used when composing 'omp
/// distribute'
/// with 'omp for' in a same construct
Expr *Init;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct
Expr *Cond;
/// Update of LowerBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NLB;
/// Update of UpperBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NUB;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct when schedule is chunked.
Expr *DistCond;
/// 'omp parallel for' loop condition used when composed with
/// 'omp distribute' in the same construct and when schedule is
/// chunked and the chunk size is 1.
Expr *ParForInDistCond;
};
/// The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// Loop iteration variable.
Expr *IterationVarRef;
/// Loop last iteration number.
Expr *LastIteration;
/// Loop number of iterations.
Expr *NumIterations;
/// Calculation of last iteration.
Expr *CalcLastIteration;
/// Loop pre-condition.
Expr *PreCond;
/// Loop condition.
Expr *Cond;
/// Loop iteration variable init.
Expr *Init;
/// Loop increment.
Expr *Inc;
/// IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// LowerBound - local variable passed to runtime.
Expr *LB;
/// UpperBound - local variable passed to runtime.
Expr *UB;
/// Stride - local variable passed to runtime.
Expr *ST;
/// EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// Update of LowerBound for statically scheduled 'omp for' loops.
Expr *NLB;
/// Update of UpperBound for statically scheduled 'omp for' loops.
Expr *NUB;
/// PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// List of counters required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentCounters;
/// List of initializers required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentInits;
/// List of final conditions required for the generation of the
/// non-rectangular loops.
SmallVector<Expr *, 4> FinalsConditions;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// Expressions used when combining OpenMP loop pragmas
DistCombinedHelperExprs DistCombinedFields;
/// Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// Initialize all the fields to null.
/// \param Size Number of elements in the
/// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions
/// arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
DependentCounters.resize(Size);
DependentInits.resize(Size);
FinalsConditions.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
PrivateCounters[i] = nullptr;
Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
DependentCounters[i] = nullptr;
DependentInits[i] = nullptr;
FinalsConditions[i] = nullptr;
}
PreInits = nullptr;
DistCombinedFields.LB = nullptr;
DistCombinedFields.UB = nullptr;
DistCombinedFields.EUB = nullptr;
DistCombinedFields.Init = nullptr;
DistCombinedFields.Cond = nullptr;
DistCombinedFields.NLB = nullptr;
DistCombinedFields.NUB = nullptr;
DistCombinedFields.DistCond = nullptr;
DistCombinedFields.ParForInDistCond = nullptr;
}
};
/// Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return cast<Expr>(Data->getChildren()[IterationVariableOffset]);
}
Expr *getLastIteration() const {
return cast<Expr>(Data->getChildren()[LastIterationOffset]);
}
Expr *getCalcLastIteration() const {
return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]);
}
Expr *getPreCond() const {
return cast<Expr>(Data->getChildren()[PreConditionOffset]);
}
Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); }
Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); }
Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); }
const Stmt *getPreInits() const {
return Data->getChildren()[PreInitsOffset];
}
Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]);
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]);
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]);
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[StrideVariableOffset]);
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]);
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]);
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]);
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NumIterationsOffset]);
}
Expr *getPrevLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]);
}
Expr *getPrevUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]);
}
Expr *getDistInc() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[DistIncOffset]);
}
Expr *getPrevEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]);
}
Expr *getCombinedLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]);
}
Expr *getCombinedUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]);
}
Expr *getCombinedEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]);
}
Expr *getCombinedInit() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedInitOffset]);
}
Expr *getCombinedCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedConditionOffset]);
}
Expr *getCombinedNextLowerBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]);
}
Expr *getCombinedNextUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]);
}
Expr *getCombinedDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]);
}
Expr *getCombinedParForInDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]);
}
/// Try to find the next loop sub-statement in the specified statement \p
/// CurStmt.
/// \param TryImperfectlyNestedLoops true, if we need to try to look for the
/// imperfectly nested loop.
static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt,
bool TryImperfectlyNestedLoops);
static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt,
bool TryImperfectlyNestedLoops) {
return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt),
TryImperfectlyNestedLoops);
}
Stmt *getBody();
const Stmt *getBody() const {
return const_cast<OMPLoopDirective *>(this)->getBody();
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); }
ArrayRef<Expr *> dependent_counters() const {
return const_cast<OMPLoopDirective *>(this)->getDependentCounters();
}
ArrayRef<Expr *> dependent_inits() { return getDependentInits(); }
ArrayRef<Expr *> dependent_inits() const {
return const_cast<OMPLoopDirective *>(this)->getDependentInits();
}
ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); }
ArrayRef<Expr *> finals_conditions() const {
return const_cast<OMPLoopDirective *>(this)->getFinalsConditions();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() ==
OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getCollapsedNumber(),
llvm::omp::OMPD_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
Expr *TaskRedRef, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getCollapsedNumber(), llvm::omp::OMPD_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionsDirective()
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSingleDirective()
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Name of the directive.
DeclarationNameInfo DirName;
/// Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, StartLoc, EndLoc),
DirName(Name) {}
/// Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, SourceLocation(),
SourceLocation()) {}
/// Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getCollapsedNumber(),
llvm::omp::OMPD_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getCollapsedNumber(), llvm::omp::OMPD_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master' directive.
///
/// \code
/// #pragma omp parallel master private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel master' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPParallelMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master, StartLoc,
EndLoc) {}
explicit OMPParallelMasterDirective()
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
///
static OMPParallelMasterDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelMasterDirective *>(this)
->getTaskReductionRefExpr();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterDirectiveClass;
}
};
/// This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelSectionsDirective()
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelSectionsDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if this directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskDirective()
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
SourceLocation(), SourceLocation()) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, SourceLocation(),
SourceLocation()) {}
/// Sets the task_reduction return variable.
void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param ReductionRef Reference to the task_reduction return variable.
///
static OMPTaskgroupDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
Expr *ReductionRef);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns reference to the task_reduction return variable.
const Expr *getReductionRef() const {
return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef();
}
Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPFlushDirective()
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// This represents '#pragma omp depobj' directive.
///
/// \code
/// #pragma omp depobj(a) depend(in:x,y)
/// \endcode
/// In this example directive '#pragma omp depobj' initializes a depobj object
/// 'a' with dependence type 'in' and a list with 'x' and 'y' locators.
class OMPDepobjDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPDepobjDirective()
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPDepobjDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPDepobjDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDepobjDirectiveClass;
}
};
/// This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
/// \param IsStandalone true, if the the standalone directive is created.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
bool IsStandalone, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart = false;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPAtomicDirective()
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
SourceLocation(), SourceLocation()) {}
/// Set 'x' part of the associated expression/statement.
void setX(Expr *X) { Data->getChildren()[0] = X; }
/// Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { Data->getChildren()[1] = UE; }
/// Set 'v' part of the associated expression/statement.
void setV(Expr *V) { Data->getChildren()[2] = V; }
/// Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { Data->getChildren()[3] = E; }
public:
/// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[0]); }
const Expr *getX() const {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
/// Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() { return cast_or_null<Expr>(Data->getChildren()[1]); }
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(Data->getChildren()[1]);
}
/// Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[2]); }
const Expr *getV() const {
return cast_or_null<Expr>(Data->getChildren()[2]);
}
/// Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[3]); }
const Expr *getExpr() const {
return cast_or_null<Expr>(Data->getChildren()[3]);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDirective()
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDataDirective()
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetEnterDataDirective()
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetEnterDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetExitDataDirective()
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetExitDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetParallelDirective()
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getCollapsedNumber(), llvm::omp::OMPD_target_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getCollapsedNumber(), llvm::omp::OMPD_target_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTeamsDirective()
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// statements and child expressions.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point, StartLoc,
EndLoc) {}
/// Build an empty directive.
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPCancelDirective()
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop' directive.
///
/// \code
/// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop simd' directive.
///
/// \code
/// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop simd' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \p NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop' directive.
///
/// \code
/// #pragma omp parallel master taskloop private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop simd' directive.
///
/// \code
/// #pragma omp parallel master taskloop simd private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop simd' has
/// clauses 'private' with the variables 'a' and 'b', 'grainsize' with
/// expression 'val' and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetUpdateDirective()
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetUpdateDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getCollapsedNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getCollapsedNumber(), llvm::omp::OMPD_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getCollapsedNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] =
E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getCollapsedNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, StartLoc, EndLoc) {
}
/// Build an empty directive.
///
explicit OMPTargetTeamsDirective()
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getCollapsedNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getCollapsedNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan inclusive(a)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'inclusive' with
/// list item 'a'.
class OMPScanDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPScanDirective()
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPScanDirectiveClass;
}
};
} // end namespace clang
#endif
|
matmat.c | #include<stdlib.h>
#include<stdio.h>
#include "omp.h"
#define rowsA 100
#define colsA 100
#define colsB 100
int main()
{
int tid,nthreads,i,j,k,chunk;
double a[rowsA][colsA],
b[colsA][colsB],
c[rowsA][colsB],
d[rowsA][colsB];
chunk=10;
//spawing a parallel region explicitly scoping all variables
//parallel
#pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k)
{
tid=omp_get_thread_num();
if(tid==0)
{
nthreads=omp_get_num_threads();
printf("%d = number of threads in the system\n",nthreads );
}
#pragma omp for schedule(static,chunk)
for(i=0;i<rowsA;i++)
for(j=0;j<colsA;j++)
a[i][j]=rand()%10;
#pragma omp for schedule(static,chunk)
for(i=0;i<colsA;i++)
for(j=0;j<colsB;j++)
b[i][j]=rand()%10;
#pragma omp for schedule(static,chunk)
for(i=0;i<rowsA;i++)
for(j=0;j<colsB;j++)
c[i][j]=0;
#pragma omp for schedule(static,chunk)
for(i=0;i<rowsA;i++)
for(j=0;j<colsB;j++)
d[i][j]=0;
printf("Threads %d starting matrix multiply...\n",tid);
#pragma omp for schedule(static, chunk)
for( i=0;i<rowsA;i++)
{
printf("Thread=%d did row=%d\n",tid,i);
for(j=0;j<colsB;j++)
for(k=0;k<colsA;k++)
c[i][j]+=a[i][k]*b[k][j];
}
}
//serial
for(i=0;i<rowsA;i++)
{
for(j=0;j<colsB;j++)
for(k=0;k<colsA;k++)
d[i][j]+=a[i][k]*b[k][j];
}
printf("Done\n");
} |
scatter.h | #pragma once
#include <fstream>
#include <algorithm>
#include <cmath>
namespace Faunus {
/** @brief Routines related to scattering.
*/
namespace Scatter {
enum Algorithm { SIMD, EIGEN, GENERIC }; //!< Selections for math algorithms
/** @brief Form factor, `F(q)`, for a hard sphere of radius `R`.
*/
template <class T = float> class FormFactorSphere {
private:
T j1(T x) const { // spherical Bessel function
T xinv = 1 / x;
return xinv * (sin(x) * xinv - cos(x));
}
public:
/**
* @param q q value in inverse angstroms
* @param a particle to take radius, \c R from
* @returns
* @f$I(q)=\left [\frac{3}{(qR)^3}\left (\sin{qR}-qR\cos{qR}\right )\right ]^2@f$
*/
template <class Tparticle> T operator()(T q, const Tparticle &a) const {
assert(q > 0 && a.radius > 0 && "Particle radius and q must be positive");
T qR = q * a.radius;
qR = 3. / (qR * qR * qR) * (sin(qR) - qR * cos(qR));
return qR * qR;
}
};
/**
* @brief Unity form factor (q independent).
*/
template <class T = float> struct FormFactorUnity {
template <class Tparticle> T operator()(T, const Tparticle &) const { return 1; }
};
/**
* @brief Calculate scattering intensity, I(q), on a mesh using the Debye formula.
*
* It is important to note that distances should be calculated without periodicity and if molecules cross
* periodic boundaries, these must be made whole before performing the analysis.
*
* The JSON object is scanned for the following keywords:
*
* - `qmin` minimum q value (1/angstrom)
* - `qmax` maximum q value (1/angstrom)
* - `dq` q mesh spacing (1/angstrom)
* - `cutoff` cutoff distance (angstrom); *Experimental!*
*
* @see http://dx.doi.org/10.1016/S0022-2860(96)09302-7
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdouble-promotion"
template <class Tformfactor, class T = float> class DebyeFormula {
static constexpr T r_cutoff_infty = 1e9; //<! a cutoff distance in angstrom considered to be infinity
T q_mesh_min, q_mesh_max, q_mesh_step; //<! q_mesh parameters in inverse angstrom; used for inline lambda-functions
/**
* @param m mesh point index
* @return the scattering vector magnitude q at the mesh point m
*/
#pragma omp declare simd uniform(this) linear(m:1)
inline T q_mesh(int m) {
return q_mesh_min + m * q_mesh_step;
}
/**
* @brief Initialize mesh for intensity and sampling.
* @param q_min Minimum q-value to sample (1/A)
* @param q_max Maximum q-value to sample (1/A)
* @param q_step Spacing between mesh points (1/A)
*/
void init_mesh(T q_min, T q_max, T q_step) {
if (q_step <= 0 || q_min <= 0 || q_max <= 0 || q_min > q_max ||
q_step / q_max < 4 * std::numeric_limits<T>::epsilon()) {
throw std::range_error("DebyeFormula: Invalid mesh parameters for q");
}
q_mesh_min = q_min < T(1e-6) ? q_step : q_min; // ensure that q > 0
q_mesh_max = q_max;
q_mesh_step = q_step;
try {
// resolution of the 1D mesh approximation of the scattering vector magnitude q
const int q_resolution = numeric_cast<int>(1.0 + std::floor((q_max - q_min) / q_step));
intensity.resize(q_resolution, 0.0);
sampling.resize(q_resolution, 0.0);
} catch (std::overflow_error &e) {
throw std::range_error("DebyeFormula: Too many samples");
}
}
Geometry::Sphere geo = Geometry::Sphere(r_cutoff_infty / 2); //!< geometry to use for distance calculations
T r_cutoff; //!< cut-off distance for scattering contributions (angstrom)
Tformfactor form_factor; //!< scattering from a single particle
std::vector<T> intensity; //!< sampled average I(q)
std::vector<T> sampling; //!< weighted number of samplings
public:
DebyeFormula(T q_min, T q_max, T q_step, T r_cutoff) : r_cutoff(r_cutoff) { init_mesh(q_min, q_max, q_step); };
DebyeFormula(T q_min, T q_max, T q_step) : DebyeFormula(r_cutoff_infty, q_min, q_max, q_step) {};
explicit DebyeFormula(const json &j)
: DebyeFormula(j.at("qmin").get<double>(), j.at("qmax").get<double>(), j.at("dq").get<double>(),
j.value("cutoff", r_cutoff_infty)){};
/**
* @brief Sample I(q) and add to average.
* @param p particle vector
* @param weight weight of sampled configuration in biased simulations
* @param volume simulation volume (angstrom cubed) used only for cut-off correction
*
* An isotropic correction is added beyond a given cut-off distance. For physics details see for example
* @see https://debyer.readthedocs.org/en/latest/.
*
* O(N^2) * O(M) complexity where N is the number of particles and M the number of mesh points. The quadratic
* complexity in N comes from the fact that the radial distribution function has to be computed.
* The current implementation supports OpenMP parallelization. Roughly half of the execution time is spend
* on computing sin values, e.g., in sinf_avx2.
*/
template <class Tpvec> void sample(const Tpvec &p, const T weight = 1, const T volume = -1) {
const int N = (int) p.size(); // number of particles
const int M = (int) intensity.size(); // number of mesh points
std::vector<T> intensity_sum(M, 0.0);
// Allow parallelization with a hand written reduction of intensity_sum at the end.
// https://gcc.gnu.org/gcc-9/porting_to.html#ompdatasharing
// #pragma omp parallel default(none) shared(N, M) shared(geo, r_cutoff, p) shared(intensity_sum)
#pragma omp parallel default(shared) shared(intensity_sum)
{
std::vector<T> intensity_sum_private(M, 0.0); // a temporal private intensity_sum
#pragma omp for schedule(dynamic)
for (int i = 0; i < N - 1; ++i) {
for (int j = i + 1; j < N; ++j) {
T r = T(geo.sqdist(p[i], p[j])); // the square root follows
if (r < r_cutoff * r_cutoff) {
r = std::sqrt(r);
// Black magic: The q_mesh function must be inlineable otherwise the loop cannot be unrolled
// using advanced SIMD instructions leading to a huge performance penalty (a factor of 4).
// The unrolled loop uses a different sin implementation, which may be spotted when profiling.
// TODO: Optimize also for other compilers than GCC by using a vector math library, e.g.,
// TODO: https://github.com/vectorclass/version2
// #pragma GCC unroll 16 // for diagnostics, GCC issues warning when cannot unroll
for (int m = 0; m < M; ++m) {
const T q = q_mesh(m);
intensity_sum_private[m] +=
form_factor(q, p[i]) * form_factor(q, p[j]) * std::sin(q * r) / (q * r);
}
}
}
}
// reduce intensity_sum_private into intensity_sum
#pragma omp critical
std::transform(intensity_sum.begin(), intensity_sum.end(), intensity_sum_private.begin(),
intensity_sum.begin(), std::plus<T>());
}
// https://gcc.gnu.org/gcc-9/porting_to.html#ompdatasharing
// #pragma omp parallel for default(none) shared(N, M, weight, volume) shared(p, r_cutoff, intensity_sum) shared(sampling, intensity)
#pragma omp parallel for shared(sampling, intensity)
for (int m = 0; m < M; ++m) {
const T q = q_mesh(m);
T intensity_self_sum = 0;
for (int i = 0; i < N; ++i) {
intensity_self_sum += std::pow(form_factor(q, p[i]), 2);
}
T intensity_corr = 0;
if (r_cutoff < r_cutoff_infty && volume > 0) {
intensity_corr = 4 * pc::pi * N / (volume * std::pow(q, 3)) *
(q * r_cutoff * std::cos(q * r_cutoff) - std::sin(q * r_cutoff));
}
sampling[m] += weight;
intensity[m] += ((2 * intensity_sum[m] + intensity_self_sum) / N + intensity_corr) * weight;
}
}
/**
* @return a tuple of min, max, and step parameters of a q-mash
*/
auto getQMeshParameters() {
return std::make_tuple(q_mesh_min, q_mesh_max, q_mesh_step);
}
/**
* @return a map containing q (key) and average intensity (value)
*/
auto getIntensity() {
std::map<T, T> averaged_intensity;
for (size_t m = 0; m < intensity.size(); ++m) {
const T average = intensity[m] / (sampling[m] != T(0.0) ? sampling[m] : T(1.0));
averaged_intensity.emplace(q_mesh(m), average);
}
return averaged_intensity;
}
};
#pragma GCC diagnostic pop
/**
* A policy for collecting samples. To be used together with StructureFactor class templates.
*
* @tparam T float or double
*/
template <typename T> class SamplingPolicy {
public:
struct sampled_value {
T value;
T weight;
};
typedef std::map<T, sampled_value> TSampledValueMap;
private:
TSampledValueMap samples;
const T precision = 10000.0; //!< precision of the key for better binning
public:
std::map<T, T> getSampling() const {
std::map<T, T> average;
for (auto [key, sample] : samples) {
average.emplace(key, sample.value / sample.weight);
}
return average;
}
/**
* @param key_approx is subject of rounding for better binning
* @param value
* @param weight
*/
void addSampling(T key_approx, T value, T weight = 1.0) {
const T key = std::round(key_approx * precision) / precision; // round |q| for better binning
samples[key].value += value * weight;
samples[key].weight += weight;
}
};
/**
* @brief Calculate structure factor using explicit q averaging.
*
* This averages over the thirteen permutations of the Miller index [100], [110], [101] using:
*
* @f[ S(\mathbf{q}) = \frac{1}{N} \left <
* \left ( \sum_i^N \sin(\mathbf{qr}_i) \right )^2 +
* \left ( \sum_j^N \cos(\mathbf{qr}_j) \right )^2
* \right >
* @f]
*
* For more information, see @see http://doi.org/d8zgw5 and @see http://doi.org/10.1063/1.449987.
*/
template <typename T = double, Algorithm method = SIMD, typename TSamplingPolicy = SamplingPolicy<T>>
class StructureFactorPBC : private TSamplingPolicy {
//! sample directions (h,k,l)
const std::vector<Point> directions = {
{1, 0, 0}, {0, 1, 0}, {0, 0, 1}, // 3 permutations
{1, 1, 0}, {0, 1, 1}, {1, 0, 1}, {-1, 1, 0}, {-1, 0, 1}, {0, -1, 1}, // 6 permutations
{1, 1, 1}, {-1, 1, 1}, {1, -1, 1}, {1, 1, -1} // 4 permutations
};
const int p_max; //!< multiples of q to be sampled
using TSamplingPolicy::addSampling;
public:
StructureFactorPBC(int q_multiplier) : p_max(q_multiplier){}
/**
* https://gcc.gnu.org/gcc-9/porting_to.html#ompdatasharing
* #pragma omp parallel for collapse(2) default(none) shared(directions, p_max, boxlength) shared(positions)
*/
template <typename Tpositions> void sample(const Tpositions& positions, const Point& boxlength) {
#pragma omp parallel for collapse(2) default(shared)
for (size_t i = 0; i < directions.size(); ++i) { // openmp req. tradional loop
for (int p = 1; p <= p_max; ++p) { // loop over multiples of q
const Point q = 2.0 * pc::pi * p * directions[i].cwiseQuotient(boxlength); // scattering vector
const auto s_of_q = calculateStructureFactor(positions, q);
#pragma omp critical // avoid race conditions when updating the map
addSampling(q.norm(), s_of_q, 1.0);
}
}
}
template <typename Tpositions> T calculateStructureFactor(const Tpositions& positions, const Point& q) const {
T sum_cos = 0.0;
T sum_sin = 0.0;
if constexpr (method == SIMD) {
// When sine and cosine is computed in separate loops, sine and cosine SIMD
// instructions may be used to get at least 4 times performance boost.
// Note January 2020: only GCC exploits this using libmvec library if --ffast-math is enabled.
auto dot_product = [q](const auto& pos) { return static_cast<T>(q.dot(pos)); };
auto qdotr = positions | ranges::cpp20::views::transform(dot_product) | ranges::to<std::vector>;
std::for_each(qdotr.begin(), qdotr.end(), [&](auto qr) { sum_cos += cos(qr); });
std::for_each(qdotr.begin(), qdotr.end(), [&](auto qr) { sum_sin += sin(qr); });
} else if constexpr (method == EIGEN) {
// Map is a Nx3 matrix facade into original positions (std::vector)
using namespace Eigen;
static_assert(std::is_same_v<Tpositions, std::vector<Point>>);
auto qdotr =
(Map<MatrixXd, 0, Stride<1, 3>>((double*)positions.data(), positions.size(), 3) * q).array().eval();
sum_cos = qdotr.cast<T>().cos().sum();
sum_sin = qdotr.cast<T>().sin().sum();
} else if constexpr (method == GENERIC) {
for (const auto& r : positions) {
const auto qr = static_cast<T>(q.dot(r));
sum_cos += cos(qr); // sine and cosine in same loop obstructs
sum_sin += sin(qr); // vectorization on most compilers...
}
};
return std::norm(std::complex<T>(sum_cos, sum_sin)) / static_cast<T>(positions.size());
}
int getQMultiplier() {
return p_max;
}
using TSamplingPolicy::getSampling;
};
/**
* @brief Calculate structure factor using explicit q averaging in isotropic periodic boundary conditions (IPBC).
*
* The sample directions reduce to 3 compared to 13 in regular periodic boundary conditions. Overall simplification
* shall yield roughly 10 times faster computation.
*/
template <typename T = float, typename TSamplingPolicy = SamplingPolicy<T>> class StructureFactorIPBC : private TSamplingPolicy {
//! Sample directions (h,k,l).
//! Due to the symmetry in IPBC we need not consider permutations of directions.
std::vector<Point> directions = {{1, 0, 0}, {1, 1, 0}, {1, 1, 1}};
int p_max; //!< multiples of q to be sampled
using TSamplingPolicy::addSampling;
public:
explicit StructureFactorIPBC(int q_multiplier) : p_max(q_multiplier) {}
template <class Tpositions> void sample(const Tpositions &positions, const Point &boxlength) {
// https://gcc.gnu.org/gcc-9/porting_to.html#ompdatasharing
// #pragma omp parallel for collapse(2) default(none) shared(directions, p_max, positions, boxlength)
#pragma omp parallel for collapse(2) default(shared)
for (size_t i = 0; i < directions.size(); ++i) {
for (int p = 1; p <= p_max; ++p) { // loop over multiples of q
const Point q = 2.0 * pc::pi * p * directions[i].cwiseQuotient(boxlength); // scattering vector
T sum_cos = 0;
for (auto &r : positions) { // loop over positions
// if q[i] == 0 then its cosine == 1 hence we can avoid cosine computation for performance reasons
T product = std::cos(T(q[0] * r[0]));
if (q[1] != 0)
product *= std::cos(T(q[1] * r[1]));
if (q[2] != 0)
product *= std::cos(T(q[2] * r[2]));
sum_cos += product;
}
// collect average, `norm()` gives the scattering vector length
const T ipbc_factor = std::pow(2, directions[i].count()); // 2 ^ number of non-zero elements
const T sf = (sum_cos * sum_cos) / (float)(positions.size()) * ipbc_factor;
#pragma omp critical
// avoid race conditions when updating the map
addSampling(q.norm(), sf, 1.0);
}
}
}
int getQMultiplier() {
return p_max;
}
using TSamplingPolicy::getSampling;
};
} // namespace Scatter
} // namespace Faunus
|
pyfr_driver_asp_reg.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <math.h>
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
# include <mkl.h>
#else /* prototypes for GEMM */
void my_dgemm( const int* M, const int* N, const int* K, const double* alpha,
const double* a, const int* LDA, const double* b, const int* LDB,
const double* beta, double* c, const int* LDC ) {
const int my_M = *M;
const int my_N = *N;
const int my_K = *K;
const int my_LDA = *LDA;
const int my_LDB = *LDB;
const int my_LDC = *LDC;
const float my_alpha = (float)*alpha;
const float my_beta = (float)*beta;
int m = 0, n = 0, k = 0;
for ( n = 0; n < my_N; ++n ) {
for ( m = 0; m < my_M; ++m ) {
c[(n * my_LDC) + m] = my_beta * c[(n * my_LDC) + m];
for ( k = 0; k < my_K; ++k ) {
c[(n * my_LDC) + m] += my_alpha * a[(k * my_LDA) + m] * b[(n * my_LDB) + k];
}
}
}
}
#endif
#define REPS 100
#define REALTYPE double
int my_csr_reader( const char* i_csr_file_in,
unsigned int** o_row_idx,
unsigned int** o_column_idx,
REALTYPE** o_values,
unsigned int* o_row_count,
unsigned int* o_column_count,
unsigned int* o_element_count ) {
FILE *l_csr_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int* l_row_idx_id = NULL;
unsigned int l_i = 0;
l_csr_file_handle = fopen( i_csr_file_in, "r" );
if ( l_csr_file_handle == NULL ) {
fprintf( stderr, "cannot open CSR file!\n" );
return -1;
}
while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return -1;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) &&
0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count)
{
/* allocate CSC datastructure matching mtx file */
*o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_element_count));
*o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_row_count + 1));
*o_values = (REALTYPE*) malloc(sizeof(double) * ((size_t)*o_element_count));
l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * ((size_t)*o_row_count));
/* check if mallocs were successful */
if ( ( *o_row_idx == NULL ) ||
( *o_column_idx == NULL ) ||
( *o_values == NULL ) ||
( l_row_idx_id == NULL ) ) {
fprintf( stderr, "could not allocate sp data!\n" );
return -1;
}
/* set everything to zero for init */
memset(*o_row_idx, 0, sizeof(unsigned int)*((size_t)*o_row_count + 1));
memset(*o_column_idx, 0, sizeof(unsigned int)*((size_t)*o_element_count));
memset(*o_values, 0, sizeof(double)*((size_t)*o_element_count));
memset(l_row_idx_id, 0, sizeof(unsigned int)*((size_t)*o_row_count));
/* init column idx */
for ( l_i = 0; l_i < (*o_row_count + 1); l_i++)
(*o_row_idx)[l_i] = (*o_element_count);
/* init */
(*o_row_idx)[0] = 0;
l_i = 0;
l_header_read = 1;
} else {
fprintf( stderr, "could not csr description!\n" );
return -1;
}
/* now we read the actual content */
} else {
unsigned int l_row, l_column;
REALTYPE l_value;
/* read a line of content */
if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return -1;
}
/* adjust numbers to zero termination */
l_row--;
l_column--;
/* add these values to row and value structure */
(*o_column_idx)[l_i] = l_column;
(*o_values)[l_i] = l_value;
l_i++;
/* handle columns, set id to own for this column, yeah we need to handle empty columns */
l_row_idx_id[l_row] = 1;
(*o_row_idx)[l_row+1] = l_i;
}
}
}
/* close mtx file */
fclose( l_csr_file_handle );
/* check if we read a file which was consistent */
if ( l_i != (*o_element_count) ) {
fprintf( stderr, "we were not able to read all elements!\n" );
return -1;
}
/* let's handle empty rows */
for ( l_i = 0; l_i < (*o_row_count); l_i++) {
assert(NULL != l_row_idx_id);
if ( l_row_idx_id[l_i] == 0 ) {
(*o_row_idx)[l_i+1] = (*o_row_idx)[l_i];
}
}
/* free helper data structure */
if ( l_row_idx_id != NULL ) {
free( l_row_idx_id );
}
return 0;
}
int main(int argc, char* argv[]) {
char* l_csr_file;
REALTYPE* l_a_sp;
unsigned int* l_rowptr;
unsigned int* l_colidx;
unsigned int l_rowcount, l_colcount, l_elements;
REALTYPE* l_a_dense;
REALTYPE* l_b;
REALTYPE* l_c_betaone;
REALTYPE* l_c_betazero;
REALTYPE* l_c_gold_betaone;
REALTYPE* l_c_gold_betazero;
REALTYPE* l_c_dense_betaone;
REALTYPE* l_c_dense_betazero;
REALTYPE l_max_error = 0.0;
int l_m;
int l_n;
int l_k;
int l_i;
int l_j;
int l_z;
int l_elems;
int l_reps;
int l_n_block;
libxsmm_timer_tickint l_start, l_end;
double l_total;
double alpha = 1.0;
double beta = 1.0;
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
char trans = 'N';
#endif
libxsmm_dfsspmdm* gemm_op_betazero = NULL;
libxsmm_dfsspmdm* gemm_op_betaone = NULL;
if (argc != 4) {
fprintf( stderr, "need csr-filename N reps!\n" );
exit(-1);
}
/* read sparse A */
l_csr_file = argv[1];
l_n = atoi(argv[2]);
l_reps = atoi(argv[3]);
if (my_csr_reader( l_csr_file,
&l_rowptr,
&l_colidx,
&l_a_sp,
&l_rowcount, &l_colcount, &l_elements ) != 0 )
{
exit(-1);
}
l_m = l_rowcount;
l_k = l_colcount;
printf("CSR matrix data structure we just read:\n");
printf("rows: %u, columns: %u, elements: %u\n", l_rowcount, l_colcount, l_elements);
/* allocate dense matrices */
l_a_dense = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_k * l_m, 64);
l_b = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_k * l_n, 64);
l_c_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_gold_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_gold_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_dense_betazero = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
l_c_dense_betaone = (REALTYPE*)libxsmm_aligned_malloc(sizeof(REALTYPE) * l_m * l_n, 64);
/* touch B */
for ( l_i = 0; l_i < l_k*l_n; l_i++) {
l_b[l_i] = (REALTYPE)libxsmm_rng_f64();
}
/* touch dense A */
for ( l_i = 0; l_i < l_k*l_m; l_i++) {
l_a_dense[l_i] = (REALTYPE)0.0;
}
/* init dense A using sparse A */
for ( l_i = 0; l_i < l_m; l_i++ ) {
l_elems = l_rowptr[l_i+1] - l_rowptr[l_i];
for ( l_z = 0; l_z < l_elems; l_z++ ) {
l_a_dense[(l_i*l_k)+l_colidx[l_rowptr[l_i]+l_z]] = l_a_sp[l_rowptr[l_i]+l_z];
}
}
/* touch C */
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_gold_betaone[l_i] = (REALTYPE)libxsmm_rng_f64();
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_betaone[l_i] = l_c_gold_betaone[l_i];
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_dense_betaone[l_i] = l_c_gold_betaone[l_i];
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_betazero[l_i] = l_c_betaone[l_i];
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_gold_betazero[l_i] = l_c_gold_betaone[l_i];
}
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
l_c_dense_betazero[l_i] = l_c_dense_betaone[l_i];
}
/* setting up fsspmdm */
l_n_block = 48;
beta = 0.0;
gemm_op_betazero = libxsmm_dfsspmdm_create( l_m, l_n_block, l_k, l_k, l_n, l_n, 1.0, beta, 1, l_a_dense );
beta = 1.0;
gemm_op_betaone = libxsmm_dfsspmdm_create( l_m, l_n_block, l_k, l_k, l_n, l_n, 1.0, beta, 0, l_a_dense );
/* compute golden results */
printf("computing golden solution...\n");
for ( l_j = 0; l_j < l_n; l_j++ ) {
for (l_i = 0; l_i < l_m; l_i++ ) {
l_elems = l_rowptr[l_i+1] - l_rowptr[l_i];
l_c_gold_betazero[(l_n*l_i) + l_j] = 0.0;
for (l_z = 0; l_z < l_elems; l_z++) {
l_c_gold_betazero[(l_n*l_i) + l_j] += l_a_sp[l_rowptr[l_i]+l_z] * l_b[(l_n*l_colidx[l_rowptr[l_i]+l_z])+l_j];
}
}
}
for ( l_j = 0; l_j < l_n; l_j++ ) {
for (l_i = 0; l_i < l_m; l_i++ ) {
l_elems = l_rowptr[l_i+1] - l_rowptr[l_i];
for (l_z = 0; l_z < l_elems; l_z++) {
l_c_gold_betaone[(l_n*l_i) + l_j] += l_a_sp[l_rowptr[l_i]+l_z] * l_b[(l_n*l_colidx[l_rowptr[l_i]+l_z])+l_j];
}
}
}
printf("...done!\n");
/* libxsmm generated code */
printf("computing libxsmm (A sparse) solution...\n");
#ifdef _OPENMP
#pragma omp parallel for private(l_z)
#endif
for (l_z = 0; l_z < l_n; l_z+=l_n_block) {
libxsmm_dfsspmdm_execute( gemm_op_betazero, l_b+l_z, l_c_betazero+l_z );
}
#ifdef _OPENMP
#pragma omp parallel for private(l_z)
#endif
for (l_z = 0; l_z < l_n; l_z+=l_n_block) {
libxsmm_dfsspmdm_execute( gemm_op_betaone, l_b+l_z, l_c_betaone+l_z );
}
printf("...done!\n");
/* BLAS code */
printf("computing BLAS (A dense) solution...\n");
beta = 0.0;
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm( &trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n );
#else
my_dgemm( &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n );
#endif
beta = 1.0;
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm( &trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n );
#else
my_dgemm( &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n );
#endif
printf("...done!\n");
/* check for errors */
l_max_error = (REALTYPE)0.0;
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
if (fabs(l_c_betazero[l_i]-l_c_gold_betazero[l_i]) > l_max_error ) {
l_max_error = fabs(l_c_betazero[l_i]-l_c_gold_betazero[l_i]);
}
}
printf("max error beta=0 (libxmm vs. gold): %f\n", l_max_error);
l_max_error = (REALTYPE)0.0;
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
if (fabs(l_c_betaone[l_i]-l_c_gold_betaone[l_i]) > l_max_error ) {
l_max_error = fabs(l_c_betaone[l_i]-l_c_gold_betaone[l_i]);
}
}
printf("max error beta=1 (libxmm vs. gold): %f\n", l_max_error);
l_max_error = (REALTYPE)0.0;
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
if (fabs(l_c_dense_betazero[l_i]-l_c_gold_betazero[l_i]) > l_max_error ) {
l_max_error = fabs(l_c_dense_betazero[l_i]-l_c_gold_betazero[l_i]);
}
}
printf("max error beta=0 (dense vs. gold): %f\n", l_max_error);
l_max_error = (REALTYPE)0.0;
for ( l_i = 0; l_i < l_m*l_n; l_i++) {
if (fabs(l_c_dense_betaone[l_i]-l_c_gold_betaone[l_i]) > l_max_error ) {
l_max_error = fabs(l_c_dense_betaone[l_i]-l_c_gold_betaone[l_i]);
}
}
printf("max error beta=1 (dense vs. gold): %f\n", l_max_error);
/* Let's measure performance */
l_start = libxsmm_timer_tick();
for ( l_j = 0; l_j < l_reps; l_j++ ) {
#ifdef _OPENMP
#pragma omp parallel for private(l_z)
#endif
for (l_z = 0; l_z < l_n; l_z+=l_n_block) {
libxsmm_dfsspmdm_execute( gemm_op_betazero, l_b+l_z, l_c_betazero+l_z );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, l_total/(double)l_reps );
fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f (sparse)\n", l_m, l_n, l_k, (2.0 * (double)l_elements * (double)l_n * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f (dense)\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s LIBXSMM (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * (((double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total );
l_start = libxsmm_timer_tick();
for ( l_j = 0; l_j < l_reps; l_j++ ) {
#ifdef _OPENMP
#pragma omp parallel for private(l_z)
#endif
for (l_z = 0; l_z < l_n; l_z+=l_n_block) {
libxsmm_dfsspmdm_execute( gemm_op_betaone, l_b+l_z, l_c_betaone+l_z );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, l_total/(double)l_reps );
fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f (sparse)\n", l_m, l_n, l_k, (2.0 * (double)l_elements * (double)l_n * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GFLOPS LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f (dense)\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s LIBXSMM (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total );
l_start = libxsmm_timer_tick();
beta = 0.0;
for ( l_j = 0; l_j < l_reps; l_j++ ) {
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm( &trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n );
#else
my_dgemm( &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betazero, &l_n );
#endif
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, l_total/(double)l_reps );
fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i, beta=0): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total );
l_start = libxsmm_timer_tick();
beta = 1.0;
for ( l_j = 0; l_j < l_reps; l_j++ ) {
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm( &trans, &trans, &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n );
#else
my_dgemm( &l_n, &l_m, &l_k, &alpha, l_b, &l_n, l_a_dense, &l_k, &beta, l_c_dense_betaone, &l_n );
#endif
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, l_total/(double)l_reps );
fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, (2.0 * (double)l_m * (double)l_n * (double)l_k * (double)l_reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i, beta=1): %f\n", l_m, l_n, l_k, ((double)sizeof(double) * ((2.0*(double)l_m * (double)l_n) + ((double)l_k * (double)l_n)) * (double)l_reps * 1.0e-9) / l_total );
/* free */
libxsmm_dfsspmdm_destroy( gemm_op_betazero );
libxsmm_dfsspmdm_destroy( gemm_op_betaone );
return EXIT_SUCCESS;
}
|
04_touch_by_all_right_alloc.c | /*
* COMPILE LINE (icc): -Ofast -fno-alias -xCORE-AVX2 -xHost -fma -use-intel-optimized-headers -falign-loops -qopenmp -parallel -pthread -ipo -vec
*/
#define _XOPEN_SOURCE 700
#define _GNU_SOURCE
#define N_default 1000
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
int main(int argc, char** argv)
{
unsigned long int N = N_default;
unsigned long int nthreads = 1;
double S = 0;
if (argc > 1)
N = atoi(*(argv + 1));
N++;
#pragma omp parallel default(none) shared(N, nthreads, S)
{
#pragma omp master
{
nthreads = omp_get_num_threads();
}
unsigned long int me = omp_get_thread_num();
// No need to do this inside a `single` subregion or outside the `parallel` region.
// Computational overhead due to duplication if minimal for realistic nthreads and
// in this way there is no need to put `barrier`s.
unsigned long int partlen_base = (unsigned long int)(N / nthreads);
unsigned long int remaind = N - partlen_base * nthreads;
unsigned long int true_partlen = partlen_base + (unsigned long int)(me < remaind);
unsigned long int mystart = me * true_partlen + remaind * (unsigned long int)(me >= remaind);
unsigned long int mystop = mystart + true_partlen;
double* array;
// MEMORY ALLOCATION
array = (double*)malloc(true_partlen * sizeof(double));
// MEMORY INITIALIZATION
#pragma parallel
for (unsigned long int ii = mystart; ii < mystop; ii++)
{
array[ii - mystart] = (double)ii;
}
double Slocal = 0; // this will store the summation
#pragma parallel
for (unsigned long int jj = 0; jj < true_partlen; jj++)
{
Slocal += array[jj];
}
free(array);
#pragma omp critical
S += Slocal;
}
printf("%g SUM\n\n\n", S);
return 0;
}
|
flops_AVX.h | /* flops_AVX.h - AVX Benchmarks
*
* Author : Alexander J. Yee
* Date Created : 10/21/2011
* Last Modified : 01/25/2012
*
*
*
* And of course... The typical copyright stuff...
*
* Redistribution of this program in both source or binary, regardless of
* form, with or without modification is permitted as long as the following
* conditions are met:
* 1. This copyright notice is maintained either inline in the source
* or distributed with the binary.
* 2. A list of all contributing authors along with their contributions
* is included either inline in the source or distributed with the
* binary.
* 3. The following disclaimer is maintained either inline in the
* source or distributed with the binary.
*
* Disclaimer:
* This software is provided "as is", without any guarantee made to its
* suitability or fitness for any particular use. It may contain bugs so use
* of this program is at your own risk. I take no responsibility for any
* damage that may unintentionally be caused through its use.
*/
#ifndef _AVX_h
#define _AVX_h
#include <immintrin.h>
#include "flops.h"
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
double test_dp_add_AVX_internal(double x, double y, size_t iterations){
register __m256d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB;
r0 = _mm256_set1_pd(x);
r1 = _mm256_set1_pd(y);
r8 = _mm256_set1_pd(-0.0);
r9 = _mm256_set1_pd(0.5);
r2 = _mm256_xor_pd(r0, r8);
r3 = _mm256_or_pd(r0, r8);
r4 = _mm256_andnot_pd(r8, r0);
r5 = _mm256_mul_pd(r1, r9);
r6 = _mm256_add_pd(r1, r9);
r7 = _mm256_sub_pd(r1, r9);
r8 = _mm256_add_pd(r0, _mm256_set1_pd(2.3));
r9 = _mm256_sub_pd(r1, _mm256_set1_pd(2.3));
uint64 iMASK = 0x800fffffffffffffull;
__m256d MASK = _mm256_set1_pd(*(double*)&iMASK);
__m256d vONE = _mm256_set1_pd(1.0);
rA = _mm256_set1_pd(0.1);
rB = _mm256_set1_pd(0.1001);
// wclk start = wclk_now();
size_t c = 0;
while (c < iterations){
size_t i = 0;
while (i < 1000){
r0 = _mm256_add_pd(r0, rA);
r1 = _mm256_add_pd(r1, rA);
r2 = _mm256_add_pd(r2, rA);
r3 = _mm256_add_pd(r3, rA);
r4 = _mm256_add_pd(r4, rA);
r5 = _mm256_add_pd(r5, rA);
r6 = _mm256_add_pd(r6, rA);
r7 = _mm256_add_pd(r7, rA);
r8 = _mm256_add_pd(r8, rA);
r9 = _mm256_add_pd(r9, rA);
r0 = _mm256_sub_pd(r0, rB);
r1 = _mm256_sub_pd(r1, rB);
r2 = _mm256_sub_pd(r2, rB);
r3 = _mm256_sub_pd(r3, rB);
r4 = _mm256_sub_pd(r4, rB);
r5 = _mm256_sub_pd(r5, rB);
r6 = _mm256_sub_pd(r6, rB);
r7 = _mm256_sub_pd(r7, rB);
r8 = _mm256_sub_pd(r8, rB);
r9 = _mm256_sub_pd(r9, rB);
//r8 = _mm256_add_pd(r0, r1);
//r9 = _mm256_add_pd(r2, r3);
//rA = _mm256_add_pd(r4, r5);
//rB = _mm256_add_pd(r6, r7);
//r0 = _mm256_sub_pd(r0, r4);
//r1 = _mm256_sub_pd(r1, r5);
//r2 = _mm256_sub_pd(r2, r6);
//r3 = _mm256_sub_pd(r3, r7);
//r4 = _mm256_add_pd(r4, r8);
//r5 = _mm256_add_pd(r5, r9);
//r6 = _mm256_add_pd(r6, rA);
//r7 = _mm256_add_pd(r7, rB);
i++;
}
//print(r0);
//print(r1);
//print(r2);
//print(r3);
//print(r4);
//print(r5);
//print(r6);
//print(r7);
//cout << endl;
r0 = _mm256_and_pd(r0, MASK);
r1 = _mm256_and_pd(r1, MASK);
r2 = _mm256_and_pd(r2, MASK);
r3 = _mm256_and_pd(r3, MASK);
r4 = _mm256_and_pd(r4, MASK);
r5 = _mm256_and_pd(r5, MASK);
r6 = _mm256_and_pd(r6, MASK);
r7 = _mm256_and_pd(r7, MASK);
r8 = _mm256_and_pd(r8, MASK);
r9 = _mm256_and_pd(r9, MASK);
r0 = _mm256_or_pd(r0, vONE);
r1 = _mm256_or_pd(r1, vONE);
r2 = _mm256_or_pd(r2, vONE);
r3 = _mm256_or_pd(r3, vONE);
r4 = _mm256_or_pd(r4, vONE);
r5 = _mm256_or_pd(r5, vONE);
r6 = _mm256_or_pd(r6, vONE);
r7 = _mm256_or_pd(r7, vONE);
r8 = _mm256_or_pd(r8, vONE);
r9 = _mm256_or_pd(r9, vONE);
c++;
}
// wclk end = wclk_now();
// double secs = wclk_secs_since(start);
// uint64 ops = 12 * 1000 * c * 4;
// cout << "Seconds = " << secs << endl;
// cout << "FP Ops = " << ops << endl;
// cout << "FLOPs = " << ops / secs << endl;
r0 = _mm256_add_pd(r0, r1);
r2 = _mm256_add_pd(r2, r3);
r4 = _mm256_add_pd(r4, r5);
r6 = _mm256_add_pd(r6, r7);
r8 = _mm256_add_pd(r8, r9);
r0 = _mm256_add_pd(r0, r2);
r4 = _mm256_add_pd(r4, r6);
r0 = _mm256_add_pd(r0, r4);
r0 = _mm256_add_pd(r0, r8);
double out = 0;
__m256d tmp = r0;
out += ((double*)&tmp)[0];
out += ((double*)&tmp)[1];
out += ((double*)&tmp)[2];
out += ((double*)&tmp)[3];
return out;
}
void test_dp_add_AVX(int tds, size_t iterations){
printf("Testing AVX Add:\n");
double *sum = (double*)malloc(tds * sizeof(double));
wclk start = wclk_now();
#pragma omp parallel num_threads(tds)
{
double ret = test_dp_add_AVX_internal(1.1, 2.1, iterations);
sum[omp_get_thread_num()] = ret;
}
double secs = wclk_secs_since(start);
uint64 ops = 20 * 1000 * iterations * tds * 4;
printf("Seconds = %g\n", secs);
printf("FP Ops = %llu\n", (unsigned long long)ops);
printf("FLOPs = %g\n", ops / secs);
double out = 0;
int c = 0;
while (c < tds){
out += sum[c++];
}
printf("sum = %g\n\n", out);
free(sum);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
double test_dp_mul_AVX_internal(double x, double y, size_t iterations){
register __m256d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB;
r0 = _mm256_set1_pd(x);
r1 = _mm256_set1_pd(y);
r8 = _mm256_set1_pd(-0.0);
r2 = _mm256_xor_pd(r0, r8);
r3 = _mm256_or_pd(r0, r8);
r4 = _mm256_andnot_pd(r8, r0);
r5 = _mm256_mul_pd(r1, _mm256_set1_pd(0.37796447300922722721));
r6 = _mm256_mul_pd(r1, _mm256_set1_pd(0.24253562503633297352));
r7 = _mm256_mul_pd(r1, _mm256_set1_pd(4.1231056256176605498));
r8 = _mm256_add_pd(r0, _mm256_set1_pd(2.3));
r9 = _mm256_sub_pd(r1, _mm256_set1_pd(2.3));
// r8 = _mm256_set1_pd(1.4142135623730950488);
// r9 = _mm256_set1_pd(1.7320508075688772935);
// rA = _mm256_set1_pd(0.57735026918962576451);
// rB = _mm256_set1_pd(0.70710678118654752440);
rA = _mm256_set1_pd(1.4142135623730950488);
rB = _mm256_set1_pd(0.70710678118654752440);
uint64 iMASK = 0x800fffffffffffffull;
__m256d MASK = _mm256_set1_pd(*(double*)&iMASK);
__m256d vONE = _mm256_set1_pd(1.0);
size_t c = 0;
while (c < iterations){
size_t i = 0;
while (i < 1000){
r0 = _mm256_mul_pd(r0, rA);
r1 = _mm256_mul_pd(r1, rA);
r2 = _mm256_mul_pd(r2, rA);
r3 = _mm256_mul_pd(r3, rA);
r4 = _mm256_mul_pd(r4, rA);
r5 = _mm256_mul_pd(r5, rA);
r6 = _mm256_mul_pd(r6, rA);
r7 = _mm256_mul_pd(r7, rA);
r8 = _mm256_mul_pd(r8, rA);
r9 = _mm256_mul_pd(r9, rA);
r0 = _mm256_mul_pd(r0, rB);
r1 = _mm256_mul_pd(r1, rB);
r2 = _mm256_mul_pd(r2, rB);
r3 = _mm256_mul_pd(r3, rB);
r4 = _mm256_mul_pd(r4, rB);
r5 = _mm256_mul_pd(r5, rB);
r6 = _mm256_mul_pd(r6, rB);
r7 = _mm256_mul_pd(r7, rB);
r8 = _mm256_mul_pd(r8, rB);
r9 = _mm256_mul_pd(r9, rB);
i++;
}
//print(r0);
//print(r1);
//print(r2);
//print(r3);
//print(r4);
//print(r5);
//print(r6);
//print(r7);
//cout << endl;
r0 = _mm256_and_pd(r0, MASK);
r1 = _mm256_and_pd(r1, MASK);
r2 = _mm256_and_pd(r2, MASK);
r3 = _mm256_and_pd(r3, MASK);
r4 = _mm256_and_pd(r4, MASK);
r5 = _mm256_and_pd(r5, MASK);
r6 = _mm256_and_pd(r6, MASK);
r7 = _mm256_and_pd(r7, MASK);
r8 = _mm256_and_pd(r8, MASK);
r9 = _mm256_and_pd(r9, MASK);
r0 = _mm256_or_pd(r0, vONE);
r1 = _mm256_or_pd(r1, vONE);
r2 = _mm256_or_pd(r2, vONE);
r3 = _mm256_or_pd(r3, vONE);
r4 = _mm256_or_pd(r4, vONE);
r5 = _mm256_or_pd(r5, vONE);
r6 = _mm256_or_pd(r6, vONE);
r7 = _mm256_or_pd(r7, vONE);
r8 = _mm256_or_pd(r8, vONE);
r9 = _mm256_or_pd(r9, vONE);
c++;
}
// wclk end = wclk_now();
// double secs = wclk_secs_since(start);
// uint64 ops = 12 * 1000 * c * 2;
// cout << "Seconds = " << secs << endl;
// cout << "FP Ops = " << ops << endl;
// cout << "FLOPs = " << ops / secs << endl;
r0 = _mm256_add_pd(r0, r1);
r2 = _mm256_add_pd(r2, r3);
r4 = _mm256_add_pd(r4, r5);
r6 = _mm256_add_pd(r6, r7);
r8 = _mm256_add_pd(r8, r9);
r0 = _mm256_add_pd(r0, r2);
r4 = _mm256_add_pd(r4, r6);
r0 = _mm256_add_pd(r0, r4);
r0 = _mm256_add_pd(r0, r8);
double out = 0;
__m256d tmp = r0;
out += ((double*)&tmp)[0];
out += ((double*)&tmp)[1];
out += ((double*)&tmp)[2];
out += ((double*)&tmp)[3];
return out;
}
void test_dp_mul_AVX(int tds, size_t iterations){
printf("Testing AVX Mul:\n");
double *sum = (double*)malloc(tds * sizeof(double));
wclk start = wclk_now();
#pragma omp parallel num_threads(tds)
{
double ret = test_dp_mul_AVX_internal(1.1, 2.1, iterations);
sum[omp_get_thread_num()] = ret;
}
double secs = wclk_secs_since(start);
uint64 ops = 20 * 1000 * iterations * tds * 4;
printf("Seconds = %g\n", secs);
printf("FP Ops = %llu\n", (unsigned long long)ops);
printf("FLOPs = %g\n", ops / secs);
double out = 0;
int c = 0;
while (c < tds){
out += sum[c++];
}
printf("sum = %g\n\n", out);
free(sum);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
double test_dp_mac_AVX_internal(double x, double y, size_t iterations){
register __m256d r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, rA, rB, rC, rD, rE, rF;
r0 = _mm256_set1_pd(x);
r1 = _mm256_set1_pd(y);
r8 = _mm256_set1_pd(-0.0);
r2 = _mm256_xor_pd(r0, r8);
r3 = _mm256_or_pd(r0, r8);
r4 = _mm256_andnot_pd(r8, r0);
r5 = _mm256_mul_pd(r1, _mm256_set1_pd(0.37796447300922722721));
r6 = _mm256_mul_pd(r1, _mm256_set1_pd(0.24253562503633297352));
r7 = _mm256_mul_pd(r1, _mm256_set1_pd(4.1231056256176605498));
r8 = _mm256_add_pd(r0, _mm256_set1_pd(0.37796447300922722721));
r9 = _mm256_add_pd(r1, _mm256_set1_pd(0.24253562503633297352));
rA = _mm256_sub_pd(r0, _mm256_set1_pd(4.1231056256176605498));
rB = _mm256_sub_pd(r1, _mm256_set1_pd(4.1231056256176605498));
rC = _mm256_set1_pd(1.4142135623730950488);
rD = _mm256_set1_pd(1.7320508075688772935);
rE = _mm256_set1_pd(0.57735026918962576451);
rF = _mm256_set1_pd(0.70710678118654752440);
uint64 iMASK = 0x800fffffffffffffull;
__m256d MASK = _mm256_set1_pd(*(double*)&iMASK);
__m256d vONE = _mm256_set1_pd(1.0);
size_t c = 0;
while (c < iterations){
size_t i = 0;
while (i < 1000){
r0 = _mm256_mul_pd(r0, rC);
r1 = _mm256_add_pd(r1, rD);
r2 = _mm256_mul_pd(r2, rE);
r3 = _mm256_sub_pd(r3, rF);
r4 = _mm256_mul_pd(r4, rC);
r5 = _mm256_add_pd(r5, rD);
r6 = _mm256_mul_pd(r6, rE);
r7 = _mm256_sub_pd(r7, rF);
r8 = _mm256_mul_pd(r8, rC);
r9 = _mm256_add_pd(r9, rD);
rA = _mm256_mul_pd(rA, rE);
rB = _mm256_sub_pd(rB, rF);
r0 = _mm256_add_pd(r0, rF);
r1 = _mm256_mul_pd(r1, rE);
r2 = _mm256_sub_pd(r2, rD);
r3 = _mm256_mul_pd(r3, rC);
r4 = _mm256_add_pd(r4, rF);
r5 = _mm256_mul_pd(r5, rE);
r6 = _mm256_sub_pd(r6, rD);
r7 = _mm256_mul_pd(r7, rC);
r8 = _mm256_add_pd(r8, rF);
r9 = _mm256_mul_pd(r9, rE);
rA = _mm256_sub_pd(rA, rD);
rB = _mm256_mul_pd(rB, rC);
r0 = _mm256_mul_pd(r0, rC);
r1 = _mm256_add_pd(r1, rD);
r2 = _mm256_mul_pd(r2, rE);
r3 = _mm256_sub_pd(r3, rF);
r4 = _mm256_mul_pd(r4, rC);
r5 = _mm256_add_pd(r5, rD);
r6 = _mm256_mul_pd(r6, rE);
r7 = _mm256_sub_pd(r7, rF);
r8 = _mm256_mul_pd(r8, rC);
r9 = _mm256_add_pd(r9, rD);
rA = _mm256_mul_pd(rA, rE);
rB = _mm256_sub_pd(rB, rF);
r0 = _mm256_add_pd(r0, rF);
r1 = _mm256_mul_pd(r1, rE);
r2 = _mm256_sub_pd(r2, rD);
r3 = _mm256_mul_pd(r3, rC);
r4 = _mm256_add_pd(r4, rF);
r5 = _mm256_mul_pd(r5, rE);
r6 = _mm256_sub_pd(r6, rD);
r7 = _mm256_mul_pd(r7, rC);
r8 = _mm256_add_pd(r8, rF);
r9 = _mm256_mul_pd(r9, rE);
rA = _mm256_sub_pd(rA, rD);
rB = _mm256_mul_pd(rB, rC);
i++;
}
//print(r0);
//print(r1);
//print(r2);
//print(r3);
//print(r4);
//print(r5);
//print(r6);
//print(r7);
//print(r8);
//print(r9);
//print(rA);
//print(rB);
//cout << endl;
r0 = _mm256_and_pd(r0, MASK);
r1 = _mm256_and_pd(r1, MASK);
r2 = _mm256_and_pd(r2, MASK);
r3 = _mm256_and_pd(r3, MASK);
r4 = _mm256_and_pd(r4, MASK);
r5 = _mm256_and_pd(r5, MASK);
r6 = _mm256_and_pd(r6, MASK);
r7 = _mm256_and_pd(r7, MASK);
r8 = _mm256_and_pd(r8, MASK);
r9 = _mm256_and_pd(r9, MASK);
rA = _mm256_and_pd(rA, MASK);
rB = _mm256_and_pd(rB, MASK);
r0 = _mm256_or_pd(r0, vONE);
r1 = _mm256_or_pd(r1, vONE);
r2 = _mm256_or_pd(r2, vONE);
r3 = _mm256_or_pd(r3, vONE);
r4 = _mm256_or_pd(r4, vONE);
r5 = _mm256_or_pd(r5, vONE);
r6 = _mm256_or_pd(r6, vONE);
r7 = _mm256_or_pd(r7, vONE);
r8 = _mm256_or_pd(r8, vONE);
r9 = _mm256_or_pd(r9, vONE);
rA = _mm256_or_pd(rA, vONE);
rB = _mm256_or_pd(rB, vONE);
c++;
}
// wclk end = wclk_now();
// double secs = wclk_secs_since(start);
// uint64 ops = 12 * 1000 * c * 2;
// cout << "Seconds = " << secs << endl;
// cout << "FP Ops = " << ops << endl;
// cout << "FLOPs = " << ops / secs << endl;
r0 = _mm256_add_pd(r0, r1);
r2 = _mm256_add_pd(r2, r3);
r4 = _mm256_add_pd(r4, r5);
r6 = _mm256_add_pd(r6, r7);
r8 = _mm256_add_pd(r8, r9);
rA = _mm256_add_pd(rA, rB);
r0 = _mm256_add_pd(r0, r2);
r4 = _mm256_add_pd(r4, r6);
r8 = _mm256_add_pd(r8, rA);
r0 = _mm256_add_pd(r0, r4);
r0 = _mm256_add_pd(r0, r8);
double out = 0;
__m256d tmp = r0;
out += ((double*)&tmp)[0];
out += ((double*)&tmp)[1];
out += ((double*)&tmp)[2];
out += ((double*)&tmp)[3];
return out;
}
void test_dp_mac_AVX(int tds, size_t iterations){
printf("Testing AVX Mul + Add:\n");
double *sum = (double*)malloc(tds * sizeof(double));
wclk start = wclk_now();
#pragma omp parallel num_threads(tds)
{
double ret = test_dp_mac_AVX_internal(1.1, 2.1, iterations);
sum[omp_get_thread_num()] = ret;
}
double secs = wclk_secs_since(start);
uint64 ops = 48 * 1000 * iterations * tds * 4;
printf("Seconds = %g\n", secs);
printf("FP Ops = %llu\n", (unsigned long long)ops);
printf("FLOPs = %g\n", ops / secs);
double out = 0;
int c = 0;
while (c < tds){
out += sum[c++];
}
printf("sum = %g\n\n", out);
free(sum);
}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
#endif
|
GB_unop__ainv_uint64_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_uint64_uint64)
// op(A') function: GB (_unop_tran__ainv_uint64_uint64)
// C type: uint64_t
// A type: uint64_t
// cast: uint64_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_uint64_uint64)
(
uint64_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
uint64_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_uint64_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
prepress.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS %
% P P R R E P P R R E SS SS %
% PPPP RRRR EEE PPPP RRRR EEE SSS SSS %
% P R R E P R R E SS SS %
% P R R EEEEE P R R EEEEE SSSSS SSSSS %
% %
% %
% MagickCore Prepress Methods %
% %
% Software Design %
% Cristy %
% October 2001 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/pixel-accessor.h"
#include "magick/prepress.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T o t a l I n k D e n s i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageTotalInkDensity() returns the total ink density for a CMYK image.
% Total Ink Density (TID) is determined by adding the CMYK values in the
% darkest shadow area in an image.
%
% The format of the GetImageTotalInkDensity method is:
%
% double GetImageTotalInkDensity(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport double GetImageTotalInkDensity(Image *image)
{
CacheView
*image_view;
double
total_ink_density;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",image->filename);
return(0.0);
}
status=MagickTrue;
total_ink_density=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
density;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
density=(double) GetPixelRed(p)+GetPixelGreen(p)+
GetPixelBlue(p)+GetPixelIndex(indexes+x);
if (density > total_ink_density)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageTotalInkDensity)
#endif
{
if (density > total_ink_density)
total_ink_density=density;
}
p++;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
total_ink_density=0.0;
return(total_ink_density);
}
|
mri-q.c | /***************************************************************************
*
* (C) Copyright 2007 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
/***************************************************************************
*
* This benchmark was adapted to run on GPUs with OpenMP 4.0 pragmas
* and OpenCL driver implemented in gpuclang 2.0 (based on clang 3.5)
*
* Marcio M Pereira <mpereira@ic.unicamp.br>
*
***************************************************************************/
/*
* C code for creating the Q data structure for fast convolution-based
* Hessian multiplication for arbitrary k-space trajectories.
*
* Inputs:
* kx - VECTOR of kx values, same length as ky and kz
* ky - VECTOR of ky values, same length as kx and kz
* kz - VECTOR of kz values, same length as kx and ky
* x - VECTOR of x values, same length as y and z
* y - VECTOR of y values, same length as x and z
* z - VECTOR of z values, same length as x and y
* phi - VECTOR of the Fourier transform of the spatial basis
* function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz.
*/
/*
* === NOTE ===
*
* The Polyhedral optmizations restricts the class of loops it can manipulate
* to sequences of imperfectly nested loops with particular constraints on the
* loop bound and array subscript expressions.
*
* To allow this optimization we fixed the problem size with __STATIC__ tag
* comment this tag to use original version.
*
* Recommended gpuclang options:
* -O2 -lm -ffast-math -opt-poly-all
*/
#ifndef __STATIC__
#define __STATIC__
#endif
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <sys/time.h>
#ifdef __APPLE__
#include <sys/malloc.h>
#include <machine/endian.h>
#else
#include <endian.h>
#include <malloc.h>
#endif
#if __BYTE_ORDER != __LITTLE_ENDIAN
# error "File I/O is not implemented for this system: wrong endianness."
#endif
#include "../../common/parboil.h"
#include "../../common/polybenchUtilFuncts.h"
#define ERROR_THRESHOLD 0.5
#define GPU_DEVICE 1
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#ifdef __STATIC__
// Define statically the problem size
#define NK 2048 // K_ELEMS_PER_GRID
#define NX 262144
#else
int NK, NX;
#endif
double t_start, t_end, t_start_GPU, t_end_GPU;
float *Qr_GPU, *Qi_GPU; /* Q signal (complex) */
float *Qr_CPU, *Qi_CPU; /* Q signal (complex) */
typedef float DATA_TYPE;
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
void ComputePhiMagCPU(float* phiR, float* phiI, float* phiMag)
{
int indexK = 0;
for (indexK = 0; indexK < NK; indexK++) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
void ComputeQGPU(struct kValues *kVals,
float* x, float* y, float* z,
float *Qr, float *Qi)
{
int indexK, indexX;
#pragma omp target device(1)
#pragma omp target map(to: kVals[:NK], x[:NX], y[:NX], z[:NX]) map(tofrom: Qr[:NX], Qi[:NX])
for (indexK = 0; indexK < NK; indexK++) {
#pragma omp parallel for
for (indexX = 0; indexX < NX; indexX++) {
float expArg = PIx2 * (kVals[indexK].Kx * x[indexX] +
kVals[indexK].Ky * y[indexX] +
kVals[indexK].Kz * z[indexX]);
float cosArg = cos(expArg);
float sinArg = sin(expArg);
float phi = kVals[indexK].PhiMag;
Qr[indexX] += phi * cosArg;
Qi[indexX] += phi * sinArg;
}
}
}
void ComputeQCPU(struct kValues *kVals,
float* x, float* y, float* z,
float *Qr, float *Qi)
{
int indexK, indexX;
for (indexK = 0; indexK < NK; indexK++) {
for (indexX = 0; indexX < NX; indexX++) {
float expArg = PIx2 * (kVals[indexK].Kx * x[indexX] +
kVals[indexK].Ky * y[indexX] +
kVals[indexK].Kz * z[indexX]);
float cosArg = cos(expArg);
float sinArg = sin(expArg);
float phi = kVals[indexK].PhiMag;
Qr[indexX] += phi * cosArg;
Qi[indexX] += phi * sinArg;
}
}
}
void createDataStructsCPU(float** phiMag, float** Qr, float** Qi)
{
*phiMag = (float* ) malloc(NK * sizeof(float));
*Qr = (float*) malloc(NX * sizeof (float));
memset((void *)*Qr, 0, NX * sizeof(float));
*Qi = (float*) malloc(NX * sizeof (float));
memset((void *)*Qi, 0, NX * sizeof(float));
}
void inputData(char* fName, int* _numK, int* _numX,
float** kx, float** ky, float** kz,
float** x, float** y, float** z,
float** phiR, float** phiI)
{
int numK, numX;
FILE* fid = fopen(fName, "r");
if (fid == NULL)
{
fprintf(stderr, "Cannot open input file\n");
exit(-1);
}
fread (&numK, sizeof (int), 1, fid);
*_numK = numK;
fread (&numX, sizeof (int), 1, fid);
*_numX = numX;
*kx = (float *) malloc(numK * sizeof (float));
fread (*kx, sizeof (float), numK, fid);
*ky = (float *) malloc(numK * sizeof (float));
fread (*ky, sizeof (float), numK, fid);
*kz = (float *) malloc(numK * sizeof (float));
fread (*kz, sizeof (float), numK, fid);
*x = (float *) malloc(numX * sizeof (float));
fread (*x, sizeof (float), numX, fid);
*y = (float *) malloc(numX * sizeof (float));
fread (*y, sizeof (float), numX, fid);
*z = (float *) malloc(numX * sizeof (float));
fread (*z, sizeof (float), numX, fid);
*phiR = (float *) malloc(numK * sizeof (float));
fread (*phiR, sizeof (float), numK, fid);
*phiI = (float *) malloc(numK * sizeof (float));
fread (*phiI, sizeof (float), numK, fid);
fclose (fid);
}
void compareResults(DATA_TYPE *A, DATA_TYPE *A_GPU, DATA_TYPE *B, DATA_TYPE *B_GPU)
{
int i,fail=0;
for (i=0; i < NX; i++)
{
if (percentDiff(A[i], A_GPU[i]) > ERROR_THRESHOLD)
{
fail++;
}
}
for (i=0; i < NX; i++)
{
if (percentDiff(B[i], B_GPU[i]) > ERROR_THRESHOLD)
{
fail++;
}
}
// print results
printf(">>\n Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f%s: %d\n", ERROR_THRESHOLD, "%", fail);
}
double mriqGPU(int argc, char *argv[]) {
int numX, numK; /* Number of X and K values */
int original_numK; /* Number of K values in input file */
float *kx, *ky, *kz; /* K trajectory (3D vectors) */
float *x, *y, *z; /* X coordinates (3D vectors) */
float *phiR, *phiI; /* Phi values (complex) */
float *phiMag; /* Magnitude of Phi */
struct kValues* kVals;
struct pb_Parameters *params;
/* Read command line */
params = pb_ReadParameters(&argc, argv);
if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL))
{
fprintf(stderr, "Expecting one input filename\n");
exit(-1);
}
/* Read in data */
fprintf(stdout, "<< Reading data ... ");
inputData(params->inpFiles[0],
&original_numK, &numX,
&kx, &ky, &kz,
&x, &y, &z,
&phiR, &phiI);
/* Reduce the number of k-space samples if a number is given
* on the command line */
if (argc < 2)
numK = original_numK;
else
{
int inputK;
char *end;
inputK = strtol(argv[1], &end, 10);
if (end == argv[1])
{
fprintf(stderr, "Expecting an integer parameter\n");
exit(-1);
}
numK = MIN(inputK, original_numK);
}
#ifndef __STATIC__
NK = numK;
NX = numX;
#endif
/* Create CPU data structures */
createDataStructsCPU(&phiMag, &Qr_GPU, &Qi_GPU);
ComputePhiMagCPU(phiR, phiI, phiMag);
kVals = (struct kValues*)calloc(numK, sizeof (struct kValues));
int k;
for (k = 0; k < numK; k++) {
kVals[k].Kx = kx[k];
kVals[k].Ky = ky[k];
kVals[k].Kz = kz[k];
kVals[k].PhiMag = phiMag[k];
}
fprintf(stdout, ">>\n<< Start computation on GPU... ");
t_start_GPU = rtclock();
ComputeQGPU(kVals, x, y, z, Qr_GPU, Qi_GPU);
t_end_GPU = rtclock();
free (kx);
free (ky);
free (kz);
free (x);
free (y);
free (z);
free (phiR);
free (phiI);
free (phiMag);
free (kVals);
return t_end_GPU - t_start_GPU;
}
double mriqCPU(int argc, char *argv[]) {
int numX, numK; /* Number of X and K values */
int original_numK; /* Number of K values in input file */
float *kx, *ky, *kz; /* K trajectory (3D vectors) */
float *x, *y, *z; /* X coordinates (3D vectors) */
float *phiR, *phiI; /* Phi values (complex) */
float *phiMag; /* Magnitude of Phi */
struct kValues* kVals;
struct pb_Parameters *params;
/* Read command line */
params = pb_ReadParameters(&argc, argv);
if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL))
{
fprintf(stderr, "Expecting one input filename\n");
exit(-1);
}
/* Read in data */
inputData(params->inpFiles[0],
&original_numK, &numX,
&kx, &ky, &kz,
&x, &y, &z,
&phiR, &phiI);
/* Reduce the number of k-space samples if a number is given
* on the command line */
if (argc < 2)
numK = original_numK;
else
{
int inputK;
char *end;
inputK = strtol(argv[1], &end, 10);
if (end == argv[1])
{
fprintf(stderr, "Expecting an integer parameter\n");
exit(-1);
}
numK = MIN(inputK, original_numK);
}
#ifndef __STATIC__
NK = numK;
NX = numX;
#endif
/* Create CPU data structures */
createDataStructsCPU(&phiMag, &Qr_CPU, &Qi_CPU);
ComputePhiMagCPU(phiR, phiI, phiMag);
kVals = (struct kValues*)calloc(numK, sizeof (struct kValues));
int k;
for (k = 0; k < numK; k++) {
kVals[k].Kx = kx[k];
kVals[k].Ky = ky[k];
kVals[k].Kz = kz[k];
kVals[k].PhiMag = phiMag[k];
}
fprintf(stdout, "\n<< Start computation on CPU... ");
t_start = rtclock();
ComputeQCPU(kVals, x, y, z, Qr_CPU, Qi_CPU);
t_end = rtclock();
free (kx);
free (ky);
free (kz);
free (x);
free (y);
free (z);
free (phiR);
free (phiI);
free (phiMag);
free (kVals);
return t_end - t_start;
}
int main (int argc, char *argv[]) {
double t_GPU, t_CPU;
fprintf(stdout, "<< Creating the Q data structure for fast convolution-based\n");
fprintf(stdout, " Hessian multiplication for arbitrary k-space trajectories.>>\n");
fprintf(stdout, "<< Elements per Grid: 2048 >>\n\n");
fprintf(stdout, " for (indexK = 0; indexK < 2048; indexK++) \n");
fprintf(stdout, " for (indexX = 0; indexX < 262144; indexX++) { \n");
fprintf(stdout, " float expArg = PIx2 * (kVals[indexK].Kx * x[indexX] +\n");
fprintf(stdout, " kVals[indexK].Ky * y[indexX] + \n");
fprintf(stdout, " kVals[indexK].Kz * z[indexX]);\n");
fprintf(stdout, " float cosArg = cos(expArg);\n");
fprintf(stdout, " float sinArg = sin(expArg);\n");
fprintf(stdout, " float phi = kVals[indexK].PhiMag;\n");
fprintf(stdout, " Qr[indexX] += phi * cosArg;\n");
fprintf(stdout, " Qi[indexX] += phi * sinArg;\n");
fprintf(stdout, " } \n\n");
t_GPU = mriqGPU(argc, argv);
fprintf(stdout, ">>\n GPU Runtime: %0.6lfs\n", t_GPU);
t_CPU = mriqCPU(argc, argv);
fprintf(stdout, ">>\n CPU Runtime: %0.6lfs\n", t_CPU);
fprintf(stdout, "\n<< Comparing Results...");
compareResults(Qr_CPU, Qr_GPU, Qi_CPU, Qi_GPU);
free(Qr_GPU);
free(Qi_GPU);
free(Qr_CPU);
free(Qi_CPU);
return 0;
}
|
stream.c | // Copyright 2009-2019 NTESS. Under the terms
// of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Copyright (c) 2009-2019, NTESS
// All rights reserved.
//
// Portions are copyright of other developers:
// See the file CONTRIBUTORS.TXT in the top level directory
// the distribution for more information.
//
// This file is part of the SST software package. For license
// information, see the LICENSE file in the top level directory of the
// distribution.
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[]) {
const int LENGTH = 2000;
printf("\n\n\nHello CramSim!!!!\n");
printf("Run a stream application with ariel and cramsim\n");
printf("------------------------------------------------------\n");
printf("Allocating arrays of size %d elements.\n", LENGTH);
double* a = (double*) malloc(sizeof(double) * LENGTH);
double* b = (double*) malloc(sizeof(double) * LENGTH);
double* c = (double*) malloc(sizeof(double) * LENGTH);
printf("Done allocating arrays.\n");
int i;
for(i = 0; i < LENGTH; ++i) {
a[i] = i;
b[i] = LENGTH - i;
c[i] = 0;
}
printf("Perfoming the fast_c compute loop...\n");
#pragma omp parallel for
for(i = 0; i < LENGTH; ++i) {
//printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i]));
c[i] = 2.0 * a[i] + 1.5 * b[i];
}
double sum = 0;
for(i = 0; i < LENGTH; ++i) {
sum += c[i];
}
printf("Sum of arrays is: %f\n", sum);
printf("Freeing arrays...\n");
free(a);
free(b);
free(c);
printf("Done.\n");
}
|
primes.c | #include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include <omp.h>
#include <offload.h>
const int a = 1;
const int b = 3000000;
double wtime()
{
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1E-6;
}
/*
* is_prime_number: Returns 1 if n is a prime number and 0 otherwise.
* This function uses trial division primality test.
*/
__attribute__((target(mic))) int is_prime_number(int n)
{
int limit = sqrt(n) + 1;
for (int i = 2; i <= limit; i++) {
if (n % i == 0)
return 0;
}
return (n > 1) ? 1 : 0;
}
int count_prime_numbers(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nprimes++;
}
return nprimes;
}
double run_host_serial()
{
double t = wtime();
int n = count_prime_numbers(a, b);
t = wtime() - t;
printf("Result (host serial): %d\n", n);
return t;
}
int count_prime_numbers_omp(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
#pragma omp parallel
{
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
#pragma omp for schedule(dynamic, 100) reduction(+:nprimes)
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nprimes++;
}
}
return nprimes;
}
double run_host_parallel()
{
double t = wtime();
int n = count_prime_numbers_omp(a, b);
t = wtime() - t;
printf("Result (host parallel): %d\n", n);
return t;
}
__attribute__((target(mic))) int count_prime_numbers_phi(int a, int b)
{
int nprimes = 0;
/* Count '2' as a prime number */
if (a <= 2) {
nprimes = 1;
a = 2;
}
/* Shift 'a' to odd number */
if (a % 2 == 0)
a++;
/* Loop over odd numbers: a, a + 2, a + 4, ... , b */
for (int i = a; i <= b; i += 2) {
if (is_prime_number(i))
nprimes++;
}
return nprimes;
}
double run_phi_serial()
{
#ifdef __INTEL_OFFLOAD
printf("Intel Xeon Phi devices: %d\n", _Offload_number_of_devices());
#endif
int n;
double t = wtime();
#pragma offload target(mic) out(n)
n = count_prime_numbers_phi(a, b);
t = wtime() - t;
printf("Result (phi serial): %d\n", n);
return t;
}
int main(int argc, char **argv)
{
printf("Count prime numbers in [%d, %d]\n", a, b);
double thost_serial = run_host_serial();
double thost_par = run_host_parallel();
double tphi_serial = run_phi_serial();
printf("Execution time (host serial): %.6f\n", thost_serial);
printf("Execution time (host parallel): %.6f\n", thost_par);
printf("Execution time (phi serial): %.6f\n", tphi_serial);
printf("Ratio phi_serial/host_serial: %.2f\n", tphi_serial / thost_serial);
printf("Speedup host_serial/host_omp: %.2f\n", thost_serial / thost_par);
return 0;
}
|
bug3.c | /******************************************************************************
* FILE: omp_bug3.c
* DESCRIPTION:
* Run time error
* AUTHOR: Blaise Barney 01/09/04
* LAST REVISED: 06/28/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 50
int main (int argc, char *argv[])
{
int i, nthreads, tid, section;
float a[N], b[N], c[N];
void print_results(float array[N], int tid, int section);
/* Some initializations */
for (i=0; i<N; i++)
a[i] = b[i] = i * 1.0;
#pragma omp parallel private(c,i,tid,section)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
/*** Use barriers for clean output ***/
#pragma omp barrier
printf("Thread %d starting...\n",tid);
#pragma omp barrier
#pragma omp sections nowait
{
#pragma omp section
{
section = 1;
for (i=0; i<N; i++)
c[i] = a[i] * b[i];
print_results(c, tid, section);
}
#pragma omp section
{
section = 2;
for (i=0; i<N; i++)
c[i] = a[i] + b[i];
print_results(c, tid, section);
}
} /* end of sections */
/*** Use barrier for clean output ***/
#pragma omp barrier
printf("Thread %d exiting...\n",tid);
} /* end of parallel section */
}
void print_results(float array[N], int tid, int section)
{
int i,j;
j = 1;
/*** use critical for clean output ***/
#pragma omp critical
{
printf("\nThread %d did section %d. The results are:\n", tid, section);
for (i=0; i<N; i++) {
printf("%e ",array[i]);
j++;
if (j == 6) {
printf("\n");
j = 1;
}
}
printf("\n");
} /*** end of critical ***/
// #pragma omp barrier
printf("Thread %d done and synchronized.\n", tid);
}
|
nbody_parallel.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timer.h"
#include <omp.h>
#define DIM 2 /* Two-dimensional system */
#define X 0 /* x-coordinate subscript */
#define Y 1 /* y-coordinate subscript */
const double G = 6.673e-11;
typedef double vect_t[DIM]; /* Vector type for position, etc. */
// vect_t forces_reduction[4999][5000];
struct particle_s
{
double m; /* Mass */
vect_t s; /* Position */
vect_t v; /* Velocity */
};
void Usage(char *prog_name);
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p);
void Get_init_cond(struct particle_s curr[], int n);
void Gen_init_cond(struct particle_s curr[], int n);
void Output_state(double time, struct particle_s curr[], int n);
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n);
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t);
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p);
int main(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int step; /* Current step */
int part; /* Current particle */
int output_freq; /* Frequency of output */
double delta_t; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
vect_t *forces; /* Forces on each particle */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i);
curr = malloc(n * sizeof(struct particle_s));
forces = malloc(n * sizeof(vect_t));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
Output_state(0, curr, n);
for (step = 1; step <= n_steps; step++)
{
memset(forces, 0, n * sizeof(vect_t));
for (part = 0; part < n - 1; part++)
Compute_force(part, forces, curr, n);
// #pragma omp parallel for
// for (part = 0; part < n - 1; part++)
// {
// memset(forces_reduction[part], 0, n * sizeof(vect_t));
// Compute_force(part, forces_reduction[part], curr, n);
// }
// #pragma omp parallel for
// for (int i = 0; i < n - 1; i++)
// {
// for (int j = 0; j < n; j++)
// {
// #pragma omp atomic
// forces[j][X] += forces_reduction[i][j][X];
// #pragma omp atomic
// forces[j][Y] += forces_reduction[i][j][Y];
// }
// }
#pragma omp parallel for
for (part = 0; part < n; part++)
{
Update_part(part, forces, curr, n, delta_t);
}
t = n_steps * delta_t;
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
}
Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
free(curr);
free(forces);
return 0;
} /* main */
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <number of particles> <number of timesteps>\n",
prog_name);
fprintf(stderr, " <size of timestep> <output frequency>\n");
fprintf(stderr, " <g|i>\n");
fprintf(stderr, " 'g': program should generate init conds\n");
fprintf(stderr, " 'i': program should get init conds from stdin\n");
exit(0);
} /* Usage */
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p)
{
if (argc != 6)
Usage(argv[0]);
*n_p = strtol(argv[1], NULL, 10);
*n_steps_p = strtol(argv[2], NULL, 10);
*delta_t_p = strtod(argv[3], NULL);
*output_freq_p = strtol(argv[4], NULL, 10);
*g_i_p = argv[5][0];
if (*n_p <= 0 || *n_steps_p < 0 || *delta_t_p <= 0)
Usage(argv[0]);
if (*g_i_p != 'g' && *g_i_p != 'i')
Usage(argv[0]);
} /* Get_args */
void Get_init_cond(struct particle_s curr[], int n)
{
int part;
printf("For each particle, enter (in order):\n");
printf(" its mass, its x-coord, its y-coord, ");
printf("its x-velocity, its y-velocity\n");
for (part = 0; part < n; part++)
{
scanf("%lf", &curr[part].m);
scanf("%lf", &curr[part].s[X]);
scanf("%lf", &curr[part].s[Y]);
scanf("%lf", &curr[part].v[X]);
scanf("%lf", &curr[part].v[Y]);
}
} /* Get_init_cond */
void Gen_init_cond(struct particle_s curr[], int n)
{
int part;
double mass = 5.0e24;
double gap = 1.0e5;
double speed = 3.0e4;
srand(1);
#pragma omp parallel for
for (part = 0; part < n; part++)
{
curr[part].m = mass;
curr[part].s[X] = part * gap;
curr[part].s[Y] = 0.0;
curr[part].v[X] = 0.0;
if (part % 2 == 0)
curr[part].v[Y] = speed;
else
curr[part].v[Y] = -speed;
}
} /* Gen_init_cond */
void Output_state(double time, struct particle_s curr[], int n)
{
int part;
printf("%.2f\n", time);
for (part = 0; part < n; part++)
{
printf("%3d %10.3e ", part, curr[part].s[X]);
printf(" %10.3e ", curr[part].s[Y]);
printf(" %10.3e ", curr[part].v[X]);
printf(" %10.3e\n", curr[part].v[Y]);
}
printf("\n");
} /* Output_state */
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n)
{
int k;
double mg;
vect_t f_part_k;
double len, len_3, fact;
double forces_part_x = 0.0;
double forces_part_y = 0.0;
#pragma omp parallel for private(f_part_k, len, len_3, mg, fact) reduction(+:forces_part_x,forces_part_y)
for (k = part + 1; k < n; k++)
{
f_part_k[X] = curr[part].s[X] - curr[k].s[X];
f_part_k[Y] = curr[part].s[Y] - curr[k].s[Y];
len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]);
len_3 = len * len * len;
mg = -G * curr[part].m * curr[k].m;
fact = mg / len_3;
f_part_k[X] *= fact;
f_part_k[Y] *= fact;
// forces[part][X] += f_part_k[X];
forces_part_x += f_part_k[X];
// forces[part][Y] += f_part_k[Y];
forces_part_y += f_part_k[Y];
forces[k][X] -= f_part_k[X];
forces[k][Y] -= f_part_k[Y];
}
forces[part][X] += forces_part_x;
forces[part][Y] += forces_part_y;
} /* Compute_force */
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t)
{
double fact = delta_t / curr[part].m;
curr[part].s[X] += delta_t * curr[part].v[X];
curr[part].s[Y] += delta_t * curr[part].v[Y];
curr[part].v[X] += fact * forces[part][X];
curr[part].v[Y] += fact * forces[part][Y];
} /* Update_part */
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p)
{
int i, j;
vect_t diff;
double pe = 0.0, ke = 0.0;
double dist, speed_sqr;
#pragma omp parallel
{
#pragma omp for reduction(+:ke) private(speed_sqr) nowait
for (i = 0; i < n; i++)
{
speed_sqr = curr[i].v[X] * curr[i].v[X] + curr[i].v[Y] * curr[i].v[Y];
ke += curr[i].m * speed_sqr;
}
#pragma omp for private(j, dist, diff) reduction(+:pe)
for (i = 0; i < n - 1; i++)
{
for (j = i + 1; j < n; j++)
{
diff[X] = curr[i].s[X] - curr[j].s[X];
diff[Y] = curr[i].s[Y] - curr[j].s[Y];
dist = sqrt(diff[X] * diff[X] + diff[Y] * diff[Y]);
pe += -G * curr[i].m * curr[j].m / dist;
}
}
#pragma omp single
ke *= 0.5;
} // parallel
*kin_en_p = ke;
*pot_en_p = pe;
} /* Compute_energy */
|
row2node_map.h | /**
* @file row2node_map.h
* @author Yibo Lin
* @date Apr 2019
*/
#ifndef _DREAMPLACE_K_REORDER_ROW2NODE_MAP_H
#define _DREAMPLACE_K_REORDER_ROW2NODE_MAP_H
#include "utility/src/common.h"
DREAMPLACE_BEGIN_NAMESPACE
/// @brief distribute cells to rows
template <typename DetailedPlaceDBType>
void make_row2node_map(const DetailedPlaceDBType& db, const typename DetailedPlaceDBType::type* vx, const typename DetailedPlaceDBType::type* vy, std::vector<std::vector<int> >& row2node_map, int num_threads)
{
// distribute cells to rows
for (int i = 0; i < db.num_nodes; ++i)
{
//typename DetailedPlaceDBType::type node_xl = vx[i];
typename DetailedPlaceDBType::type node_yl = vy[i];
//typename DetailedPlaceDBType::type node_xh = node_xl+db.node_size_x[i];
typename DetailedPlaceDBType::type node_yh = node_yl+db.node_size_y[i];
int row_idxl = (node_yl-db.yl)/db.row_height;
int row_idxh = ceil((node_yh-db.yl)/db.row_height)+1;
row_idxl = std::max(row_idxl, 0);
row_idxh = std::min(row_idxh, db.num_sites_y);
for (int row_id = row_idxl; row_id < row_idxh; ++row_id)
{
typename DetailedPlaceDBType::type row_yl = db.yl+row_id*db.row_height;
typename DetailedPlaceDBType::type row_yh = row_yl+db.row_height;
if (node_yl < row_yh && node_yh > row_yl) // overlap with row
{
row2node_map[row_id].push_back(i);
}
}
}
#pragma omp parallel for num_threads (num_threads) schedule(dynamic, 1)
for (int i = 0; i < db.num_sites_y; ++i)
{
auto& row2nodes = row2node_map[i];
// sort cells within rows according to left edges
std::sort(row2nodes.begin(), row2nodes.end(),
[&] (int node_id1, int node_id2) {
typename DetailedPlaceDBType::type x1 = vx[node_id1];
typename DetailedPlaceDBType::type x2 = vx[node_id2];
return x1 < x2 || (x1 == x2 && node_id1 < node_id2);
});
// After sorting by left edge,
// there is a special case for fixed cells where
// one fixed cell is completely within another in a row.
// This will cause failure to detect some overlaps.
// We need to remove the "small" fixed cell that is inside another.
if (!row2nodes.empty())
{
removeContainedFixedCellsFromRow(row2nodes, db.num_movable_nodes, vx, db.node_size_x);
// sort according to center
std::sort(row2nodes.begin(), row2nodes.end(),
[&] (int node_id1, int node_id2) {
typename DetailedPlaceDBType::type x1 = vx[node_id1] + db.node_size_x[node_id1]/2;
typename DetailedPlaceDBType::type x2 = vx[node_id2] + db.node_size_x[node_id2]/2;
return x1 < x2 || (x1 == x2 && node_id1 < node_id2);
});
}
}
}
DREAMPLACE_END_NAMESPACE
#endif
|
stat_ops.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "stat_ops.h"
#include "utility.h"
#include "constant.h"
double expectation_value_X_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim);
double expectation_value_Y_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim);
double expectation_value_Z_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim);
double expectation_value_multi_qubit_Pauli_operator_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index, const CTYPE* state, ITYPE dim);
double expectation_value_multi_qubit_Pauli_operator_Z_mask(ITYPE phase_flip_mask, const CTYPE* state, ITYPE dim);
CTYPE transition_amplitude_multi_qubit_Pauli_operator_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim);
CTYPE transition_amplitude_multi_qubit_Pauli_operator_Z_mask(ITYPE phase_flip_mask, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim);
// calculate norm
double state_norm(const CTYPE *state, ITYPE dim) {
ITYPE index;
double norm = 0;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:norm)
#endif
for (index = 0; index < dim; ++index){
norm += pow(cabs(state[index]), 2);
}
return norm;
}
// calculate entropy of probability distribution of Z-basis measurements
double measurement_distribution_entropy(const CTYPE *state, ITYPE dim){
ITYPE index;
double ent=0;
const double eps = 1e-15;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:ent)
#endif
for(index = 0; index < dim; ++index){
double prob = pow(cabs(state[index]),2);
if(prob > eps){
ent += -1.0*prob*log(prob);
}
}
return ent;
}
// calculate inner product of two state vector
CTYPE state_inner_product(const CTYPE *state_bra, const CTYPE *state_ket, ITYPE dim) {
#ifndef _MSC_VER
CTYPE value = 0;
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:value)
#endif
for(index = 0; index < dim; ++index){
value += conj(state_bra[index]) * state_ket[index];
}
return value;
#else
double real_sum = 0.;
double imag_sum = 0.;
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:real_sum,imag_sum)
#endif
for (index = 0; index < dim; ++index) {
CTYPE value;
value += conj(state_bra[index]) * state_ket[index];
real_sum += creal(value);
imag_sum += cimag(value);
}
return real_sum + 1.i * imag_sum;
#endif
}
void state_add(const CTYPE *state_added, CTYPE *state, ITYPE dim) {
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < dim; ++index) {
state[index] += state_added[index];
}
}
void state_multiply(CTYPE coef, CTYPE *state, ITYPE dim) {
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < dim; ++index) {
state[index] *= coef;
}
}
// calculate probability with which we obtain 0 at target qubit
double M0_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_0 = insert_zero_to_basis_index(state_index,mask,target_qubit_index);
sum += pow(cabs(state[basis_0]),2);
}
return sum;
}
// calculate probability with which we obtain 1 at target qubit
double M1_prob(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_1 = insert_zero_to_basis_index(state_index,mask,target_qubit_index) ^ mask;
sum += pow(cabs(state[basis_1]),2);
}
return sum;
}
// calculate merginal probability with which we obtain the set of values measured_value_list at sorted_target_qubit_index_list
// warning: sorted_target_qubit_index_list must be sorted.
double marginal_prob(const UINT* sorted_target_qubit_index_list, const UINT* measured_value_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim){
ITYPE loop_dim = dim >> target_qubit_index_count;
ITYPE state_index;
double sum=0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index = 0;state_index < loop_dim; ++state_index){
ITYPE basis = state_index;
for(UINT cursor=0; cursor < target_qubit_index_count ; cursor++){
UINT insert_index = sorted_target_qubit_index_list[cursor];
ITYPE mask = 1ULL << insert_index;
basis = insert_zero_to_basis_index(basis, mask , insert_index );
basis ^= mask * measured_value_list[cursor];
}
sum += pow(cabs(state[basis]),2);
}
return sum;
}
// calculate expectation value of X on target qubit
double expectation_value_X_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_0 = insert_zero_to_basis_index(state_index,mask,target_qubit_index);
ITYPE basis_1 = basis_0 ^ mask;
sum += creal( conj(state[basis_0]) * state[basis_1] ) * 2;
}
return sum;
}
// calculate expectation value of Y on target qubit
double expectation_value_Y_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE mask = 1ULL << target_qubit_index;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_0 = insert_zero_to_basis_index(state_index,mask,target_qubit_index);
ITYPE basis_1 = basis_0 ^ mask;
sum += cimag( conj(state[basis_0]) * state[basis_1] ) * 2;
}
return sum;
}
// calculate expectation value of Z on target qubit
double expectation_value_Z_Pauli_operator(UINT target_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim;
ITYPE state_index;
double sum =0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
int sign = 1 - 2 * ((state_index >> target_qubit_index)%2);
sum += creal( conj(state[state_index]) * state[state_index] ) * sign;
}
return sum;
}
// calculate expectation value for single-qubit pauli operator
double expectation_value_single_qubit_Pauli_operator(UINT target_qubit_index, UINT Pauli_operator_type, const CTYPE *state, ITYPE dim) {
if(Pauli_operator_type == 0){
return state_norm(state,dim);
}else if(Pauli_operator_type == 1){
return expectation_value_X_Pauli_operator(target_qubit_index, state, dim);
}else if(Pauli_operator_type == 2){
return expectation_value_Y_Pauli_operator(target_qubit_index, state, dim);
}else if(Pauli_operator_type == 3){
return expectation_value_Z_Pauli_operator(target_qubit_index, state, dim);
}else{
fprintf(stderr,"invalid expectation value of pauli operator is called");
exit(1);
}
}
// calculate expectation value of multi-qubit Pauli operator on qubits.
// bit-flip mask : the n-bit binary string of which the i-th element is 1 iff the i-th pauli operator is X or Y
// phase-flip mask : the n-bit binary string of which the i-th element is 1 iff the i-th pauli operator is Y or Z
// We assume bit-flip mask is nonzero, namely, there is at least one X or Y operator.
// the pivot qubit is any qubit index which has X or Y
// To generate bit-flip mask and phase-flip mask, see get_masks_*_list at utility.h
double expectation_value_multi_qubit_Pauli_operator_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count,UINT pivot_qubit_index, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim/2;
const ITYPE pivot_mask = 1ULL << pivot_qubit_index;
ITYPE state_index;
double sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
UINT sign_0 = count_population(basis_0 & phase_flip_mask)%2;
sum += creal(state[basis_0] * conj(state[basis_1]) * PHASE_90ROT[ (global_phase_90rot_count + sign_0*2)%4 ] * 2.0);
}
return sum;
}
double expectation_value_multi_qubit_Pauli_operator_Z_mask(ITYPE phase_flip_mask, const CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim;
ITYPE state_index;
double sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for(state_index=0;state_index<loop_dim;++state_index){
int bit_parity = count_population(state_index & phase_flip_mask)%2;
int sign = 1 - 2*bit_parity;
sum += pow(cabs(state[state_index]),2) * sign;
}
return sum;
}
CTYPE transition_amplitude_multi_qubit_Pauli_operator_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim) {
const ITYPE loop_dim = dim / 2;
const ITYPE pivot_mask = 1ULL << pivot_qubit_index;
ITYPE state_index;
#ifndef _MSC_VER
CTYPE sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
UINT sign_0 = count_population(basis_0 & phase_flip_mask) % 2;
sum += state_ket[basis_0] * conj(state_bra[basis_1]) * PHASE_90ROT[(global_phase_90rot_count + sign_0 * 2) % 4];
UINT sign_1 = count_population(basis_1 & phase_flip_mask) % 2;
sum += state_ket[basis_1] * conj(state_bra[basis_0]) * PHASE_90ROT[(global_phase_90rot_count + sign_1 * 2) % 4];
}
#else
double sum_real = 0.;
double sum_imag = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum_real, sum_imag)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index);
ITYPE basis_1 = basis_0 ^ bit_flip_mask;
UINT sign_0 = count_population(basis_0 & phase_flip_mask) % 2;
UINT sign_1 = count_population(basis_1 & phase_flip_mask) % 2;
CTYPE val1 = state_ket[basis_0] * conj(state_bra[basis_1]) * PHASE_90ROT[(global_phase_90rot_count + sign_0 * 2) % 4];
CTYPE val2 = state_ket[basis_1] * conj(state_bra[basis_0]) * PHASE_90ROT[(global_phase_90rot_count + sign_1 * 2) % 4];
sum_real += creal(val1);
sum_imag += cimag(val1);
sum_real += creal(val2);
sum_imag += cimag(val2);
}
CTYPE sum(sum_real, sum_imag);
#endif
return sum;
}
CTYPE transition_amplitude_multi_qubit_Pauli_operator_Z_mask(ITYPE phase_flip_mask, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim) {
const ITYPE loop_dim = dim;
ITYPE state_index;
#ifndef _MSC_VER
CTYPE sum = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
int bit_parity = count_population(state_index & phase_flip_mask) % 2;
double sign = 1 - 2 * bit_parity;
sum += sign*state_ket[state_index] * conj(state_bra[state_index]);
}
return sum;
#else
double sum_real = 0.;
double sum_imag = 0.;
#ifdef _OPENMP
#pragma omp parallel for reduction(+:sum_real, sum_imag)
#endif
for (state_index = 0; state_index < loop_dim; ++state_index) {
int bit_parity = count_population(state_index & phase_flip_mask) % 2;
double sign = 1 - 2 * bit_parity;
CTYPE val = sign * state_ket[state_index] * conj(state_bra[state_index]);
sum_real += creal(val);
sum_imag += cimag(val);
}
CTYPE sum(sum_real, sum_imag);
#endif
return sum;
}
double expectation_value_multi_qubit_Pauli_operator_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, const CTYPE* state, ITYPE dim){
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
double result;
if(bit_flip_mask == 0){
result = expectation_value_multi_qubit_Pauli_operator_Z_mask(phase_flip_mask, state,dim);
}else{
result = expectation_value_multi_qubit_Pauli_operator_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim);
}
return result;
}
double expectation_value_multi_qubit_Pauli_operator_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, const CTYPE* state, ITYPE dim){
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
double result;
if(bit_flip_mask == 0){
result = expectation_value_multi_qubit_Pauli_operator_Z_mask(phase_flip_mask, state, dim);
}else{
result = expectation_value_multi_qubit_Pauli_operator_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim);
}
return result;
}
CTYPE transition_amplitude_multi_qubit_Pauli_operator_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim) {
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
CTYPE result;
if (bit_flip_mask == 0) {
result = transition_amplitude_multi_qubit_Pauli_operator_Z_mask(phase_flip_mask, state_bra, state_ket, dim);
}
else {
result = transition_amplitude_multi_qubit_Pauli_operator_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state_bra, state_ket, dim);
}
return result;
}
CTYPE transition_amplitude_multi_qubit_Pauli_operator_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, const CTYPE* state_bra, const CTYPE* state_ket, ITYPE dim) {
ITYPE bit_flip_mask = 0;
ITYPE phase_flip_mask = 0;
UINT global_phase_90rot_count = 0;
UINT pivot_qubit_index = 0;
get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count,
&bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index);
CTYPE result;
if (bit_flip_mask == 0) {
result = transition_amplitude_multi_qubit_Pauli_operator_Z_mask(phase_flip_mask, state_bra, state_ket, dim);
}
else {
result = transition_amplitude_multi_qubit_Pauli_operator_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state_bra, state_ket, dim);
}
return result;
}
|
tree_extrap_functor.h | // *************************************************************************
// Copyright (C) 2016 by Arash Bakhtiari
// You may not use this file except in compliance with the License.
// You obtain a copy of the License in the LICENSE file.
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// *************************************************************************
#ifndef SRC_TREE_EXTRAP_FUNCTOR_H_
#define SRC_TREE_EXTRAP_FUNCTOR_H_
#include <vector>
#include <cheb_node.hpp>
#include <profile.hpp>
#include <pvfmm_common.hpp>
#include "utils/common.h"
#include "utils/cubic.h"
namespace tbslas {
template <typename Real_t, class Tree_t>
class FieldExtrapFunctor {
public:
explicit FieldExtrapFunctor(Tree_t *tp, Tree_t *tc) : tp_(tp), tc_(tc) {
typedef typename Tree_t::Node_t Node_t;
tbslas::SimConfig *sim_config = tbslas::SimConfigSingleton::Instance();
//////////////////////////////////////////////////////////////////////
// GET THE TREES PARAMETERS
//////////////////////////////////////////////////////////////////////
Node_t *n_curr = tp_->PostorderFirst();
while (n_curr != NULL) {
if (!n_curr->IsGhost() && n_curr->IsLeaf()) break;
n_curr = tp_->PostorderNxt(n_curr);
}
data_dof_ = n_curr->DataDOF();
}
virtual ~FieldExtrapFunctor() {}
void operator()(const Real_t *query_points_pos, int num_points, Real_t *out) {
////////////////////////////////////////////////////////////////////////
// EXTRAPOLATE IN TIME FOR ALL QUERY POINTS
////////////////////////////////////////////////////////////////////////
// ===================================
// CONSTRUCT THE EVALUATORS
// ===================================
tbslas::NodeFieldFunctor<Real_t, Tree_t> tp_evaluator(tp_);
tbslas::NodeFieldFunctor<Real_t, Tree_t> tc_evaluator(tc_);
// ===================================
// EVALUATE AT T^N
// ===================================
std::vector<Real_t> tnc_pnts_val;
tnc_pnts_val.resize(num_points * data_dof_);
tc_evaluator(query_points_pos, num_points, tnc_pnts_val.data());
// ===================================
// EVALUATE AT T^(N-1)
// ===================================
std::vector<Real_t> tnp_pnts_val;
tnp_pnts_val.resize(num_points * data_dof_);
tp_evaluator(query_points_pos, num_points, tnp_pnts_val.data());
// ===================================
// COMBINE AND STORE THE VALUES
// ===================================
Real_t ccoeff = 3.0 / 2;
Real_t pcoeff = 0.5;
#pragma omp parallel for
for (int i = 0; i < tnc_pnts_val.size(); i++) {
out[i] = ccoeff * tnc_pnts_val[i] - pcoeff * tnp_pnts_val[i];
}
}
void update(Tree_t *new_tree, Real_t time) {
// POP FRONT (DEALLOCATE AND REMOVE) THE FIRST TREE IN THE QUEUE
delete tp_;
tp_ = tc_;
tc_ = new_tree;
}
private:
Tree_t *tc_;
Tree_t *tp_;
int data_dof_;
};
} // namespace tbslas
#endif // SRC_TREE_EXTRAP_FUNCTOR_H_
|
schedule-claused.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n=200,chunk,a[n],suma=0;
if(argc < 3) {
fprintf(stderr,"\nFalta iteraciones o chunk \n");
exit(-1);
}
n = atoi(argv[1]); if (n>200) n=200; chunk = atoi(argv[2]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel for firstprivate(suma) \
lastprivate(suma)schedule(dynamic,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d]=%d suma=%d \n",
omp_get_thread_num(),i,a[i],suma);
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
}
|
clang-274983.c | #include <omp.h>
#include <stdio.h>
int main(){
omp_set_default_device(0);
int N=10;
double *x = (double*) omp_target_alloc(N*sizeof(double),omp_get_default_device());
#pragma omp target enter data map(to: x[:N])
fprintf(stderr, "before target data: x[1] 0x%p \n",&x[1]);
#pragma omp target data use_device_ptr(x)
{
fprintf(stderr, "in target data: x[1] 0x%p \n",&x[1]);
}
#pragma omp target
{
for (int i = 0; i < N; ++i) x[i] = 2.0;
printf("in target x[1] 0x%p \n",&x[1]);
}
return 0;
}
|
sort.c | /**********************************************************************************************/
/* This program is part of the Barcelona OpenMP Tasks Suite */
/* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */
/* Copyright (C) 2009 Universitat Politecnica de Catalunya */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */
/**********************************************************************************************/
/*
* Original code from the Cilk project
*
* Copyright (c) 2000 Massachusetts Institute of Technology
* Copyright (c) 2000 Matteo Frigo
*/
/*
* this program uses an algorithm that we call `cilksort'.
* The algorithm is essentially mergesort:
*
* cilksort(in[1..n]) =
* spawn cilksort(in[1..n/2], tmp[1..n/2])
* spawn cilksort(in[n/2..n], tmp[n/2..n])
* sync
* spawn cilkmerge(tmp[1..n/2], tmp[n/2..n], in[1..n])
*
*
* The procedure cilkmerge does the following:
*
* cilkmerge(A[1..n], B[1..m], C[1..(n+m)]) =
* find the median of A \union B using binary
* search. The binary search gives a pair
* (ma, mb) such that ma + mb = (n + m)/2
* and all elements in A[1..ma] are smaller than
* B[mb..m], and all the B[1..mb] are smaller
* than all elements in A[ma..n].
*
* spawn cilkmerge(A[1..ma], B[1..mb], C[1..(n+m)/2])
* spawn cilkmerge(A[ma..m], B[mb..n], C[(n+m)/2 .. (n+m)])
* sync
*
* The algorithm appears for the first time (AFAIK) in S. G. Akl and
* N. Santoro, "Optimal Parallel Merging and Sorting Without Memory
* Conflicts", IEEE Trans. Comp., Vol. C-36 No. 11, Nov. 1987 . The
* paper does not express the algorithm using recursion, but the
* idea of finding the median is there.
*
* For cilksort of n elements, T_1 = O(n log n) and
* T_\infty = O(log^3 n). There is a way to shave a
* log factor in the critical path (left as homework).
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bots.h"
#include "app-desc.h"
ELM *array, *tmp;
static unsigned long rand_nxt = 0;
static inline unsigned long my_rand(void)
{
rand_nxt = rand_nxt * 1103515245 + 12345;
return rand_nxt;
}
static inline void my_srand(unsigned long seed)
{
rand_nxt = seed;
}
static inline ELM med3(ELM a, ELM b, ELM c)
{
if (a < b) {
if (b < c) {
return b;
} else {
if (a < c)
return c;
else
return a;
}
} else {
if (b > c) {
return b;
} else {
if (a > c)
return c;
else
return a;
}
}
}
/*
* simple approach for now; a better median-finding
* may be preferable
*/
static inline ELM choose_pivot(ELM *low, ELM *high)
{
return med3(*low, *high, low[(high - low) / 2]);
}
static ELM *seqpart(ELM *low, ELM *high)
{
ELM pivot;
ELM h, l;
ELM *curr_low = low;
ELM *curr_high = high;
pivot = choose_pivot(low, high);
while (1) {
while ((h = *curr_high) > pivot)
curr_high--;
while ((l = *curr_low) < pivot)
curr_low++;
if (curr_low >= curr_high)
break;
*curr_high-- = l;
*curr_low++ = h;
}
/*
* I don't know if this is really necessary.
* The problem is that the pivot is not always the
* first element, and the partition may be trivial.
* However, if the partition is trivial, then
* *high is the largest element, whence the following
* code.
*/
if (curr_high < high)
return curr_high;
else
return curr_high - 1;
}
#define swap(a, b) \
{ \
ELM tmp;\
tmp = a;\
a = b;\
b = tmp;\
}
static void insertion_sort(ELM *low, ELM *high)
{
ELM *p, *q;
ELM a, b;
for (q = low + 1; q <= high; ++q) {
a = q[0];
for (p = q - 1; p >= low && (b = p[0]) > a; p--)
p[1] = b;
p[1] = a;
}
}
/*
* tail-recursive quicksort, almost unrecognizable :-)
*/
void seqquick(ELM *low, ELM *high)
{
ELM *p;
while (high - low >= bots_app_cutoff_value_2) {
p = seqpart(low, high);
seqquick(low, p);
low = p + 1;
}
insertion_sort(low, high);
}
void seqmerge(ELM *low1, ELM *high1, ELM *low2, ELM *high2,
ELM *lowdest)
{
ELM a1, a2;
/*
* The following 'if' statement is not necessary
* for the correctness of the algorithm, and is
* in fact subsumed by the rest of the function.
* However, it is a few percent faster. Here is why.
*
* The merging loop below has something like
* if (a1 < a2) {
* *dest++ = a1;
* ++low1;
* if (end of array) break;
* a1 = *low1;
* }
*
* Now, a1 is needed immediately in the next iteration
* and there is no way to mask the latency of the load.
* A better approach is to load a1 *before* the end-of-array
* check; the problem is that we may be speculatively
* loading an element out of range. While this is
* probably not a problem in practice, yet I don't feel
* comfortable with an incorrect algorithm. Therefore,
* I use the 'fast' loop on the array (except for the last
* element) and the 'slow' loop for the rest, saving both
* performance and correctness.
*/
if (low1 < high1 && low2 < high2) {
a1 = *low1;
a2 = *low2;
for (;;) {
if (a1 < a2) {
*lowdest++ = a1;
a1 = *++low1;
if (low1 >= high1)
break;
} else {
*lowdest++ = a2;
a2 = *++low2;
if (low2 >= high2)
break;
}
}
}
if (low1 <= high1 && low2 <= high2) {
a1 = *low1;
a2 = *low2;
for (;;) {
if (a1 < a2) {
*lowdest++ = a1;
++low1;
if (low1 > high1)
break;
a1 = *low1;
} else {
*lowdest++ = a2;
++low2;
if (low2 > high2)
break;
a2 = *low2;
}
}
}
if (low1 > high1) {
memcpy(lowdest, low2, sizeof(ELM) * (high2 - low2 + 1));
} else {
memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1 + 1));
}
}
#define swap_indices(a, b) \
{ \
ELM *tmp;\
tmp = a;\
a = b;\
b = tmp;\
}
ELM *binsplit(ELM val, ELM *low, ELM *high)
{
/*
* returns index which contains greatest element <= val. If val is
* less than all elements, returns low-1
*/
ELM *mid;
while (low != high) {
mid = low + ((high - low + 1) >> 1);
if (val <= *mid)
high = mid - 1;
else
low = mid;
}
if (*low > val)
return low - 1;
else
return low;
}
void cilkmerge_par(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest)
{
/*
* Cilkmerge: Merges range [low1, high1] with range [low2, high2]
* into the range [lowdest, ...]
*/
ELM *split1, *split2; /*
* where each of the ranges are broken for
* recursive merge
*/
long int lowsize; /*
* total size of lower halves of two
* ranges - 2
*/
/*
* We want to take the middle element (indexed by split1) from the
* larger of the two arrays. The following code assumes that split1
* is taken from range [low1, high1]. So if [low1, high1] is
* actually the smaller range, we should swap it with [low2, high2]
*/
if (high2 - low2 > high1 - low1) {
swap_indices(low1, low2);
swap_indices(high1, high2);
}
if (high2 < low2) {
/* smaller range is empty */
memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1));
return;
}
if (high2 - low2 < bots_app_cutoff_value ) {
seqmerge(low1, high1, low2, high2, lowdest);
return;
}
/*
* Basic approach: Find the middle element of one range (indexed by
* split1). Find where this element would fit in the other range
* (indexed by split 2). Then merge the two lower halves and the two
* upper halves.
*/
split1 = ((high1 - low1 + 1) / 2) + low1;
split2 = binsplit(*split1, low2, high2);
lowsize = split1 - low1 + split2 - low2;
/*
* directly put the splitting element into
* the appropriate location
*/
*(lowdest + lowsize + 1) = *split1;
#pragma omp task
cilkmerge_par(low1, split1 - 1, low2, split2, lowdest);
#pragma omp task
cilkmerge_par(split1 + 1, high1, split2 + 1, high2,
lowdest + lowsize + 2);
#pragma omp taskwait
return;
}
void cilksort_par(ELM *low, ELM *tmp, long size)
{
/*
* divide the input in four parts of the same size (A, B, C, D)
* Then:
* 1) recursively sort A, B, C, and D (in parallel)
* 2) merge A and B into tmp1, and C and D into tmp2 (in parallel)
* 3) merge tmp1 and tmp2 into the original array
*/
long quarter = size / 4;
ELM *A, *B, *C, *D, *tmpA, *tmpB, *tmpC, *tmpD;
if (size < bots_app_cutoff_value_1 ) {
/* quicksort when less than 1024 elements */
seqquick(low, low + size - 1);
return;
}
A = low;
tmpA = tmp;
B = A + quarter;
tmpB = tmpA + quarter;
C = B + quarter;
tmpC = tmpB + quarter;
D = C + quarter;
tmpD = tmpC + quarter;
#pragma omp task
cilksort_par(A, tmpA, quarter);
#pragma omp task
cilksort_par(B, tmpB, quarter);
#pragma omp task
cilksort_par(C, tmpC, quarter);
#pragma omp task
cilksort_par(D, tmpD, size - 3 * quarter);
#pragma omp taskwait
#pragma omp task
cilkmerge_par(A, A + quarter - 1, B, B + quarter - 1, tmpA);
#pragma omp task
cilkmerge_par(C, C + quarter - 1, D, low + size - 1, tmpC);
#pragma omp taskwait
cilkmerge_par(tmpA, tmpC - 1, tmpC, tmpA + size - 1, A);
}
void scramble_array( ELM *array )
{
unsigned long i;
unsigned long j;
for (i = 0; i < bots_arg_size; ++i) {
j = my_rand();
j = j % bots_arg_size;
swap(array[i], array[j]);
}
}
void fill_array( ELM *array )
{
unsigned long i;
my_srand(1);
/* first, fill with integers 1..size */
for (i = 0; i < bots_arg_size; ++i) {
array[i] = i;
}
}
void sort_init ( void )
{
/* Checking arguments */
if (bots_arg_size < 4) {
bots_message("%s can not be less than 4, using 4 as a parameter.\n", BOTS_APP_DESC_ARG_SIZE );
bots_arg_size = 4;
}
if (bots_app_cutoff_value < 2) {
bots_message("%s can not be less than 2, using 2 as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF);
bots_app_cutoff_value = 2;
}
else if (bots_app_cutoff_value > bots_arg_size ) {
bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF, bots_arg_size);
bots_app_cutoff_value = bots_arg_size;
}
if (bots_app_cutoff_value_1 > bots_arg_size ) {
bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_1, bots_arg_size);
bots_app_cutoff_value_1 = bots_arg_size;
}
if (bots_app_cutoff_value_2 > bots_arg_size ) {
bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, bots_arg_size);
bots_app_cutoff_value_2 = bots_arg_size;
}
if (bots_app_cutoff_value_2 > bots_app_cutoff_value_1) {
bots_message("%s can not be greather than %s, using %d as a parameter.\n",
BOTS_APP_DESC_ARG_CUTOFF_2,
BOTS_APP_DESC_ARG_CUTOFF_1,
bots_app_cutoff_value_1
);
bots_app_cutoff_value_2 = bots_app_cutoff_value_1;
}
array = (ELM *) malloc(bots_arg_size * sizeof(ELM));
tmp = (ELM *) malloc(bots_arg_size * sizeof(ELM));
fill_array(array);
scramble_array(array);
}
void sort_par ( void )
{
bots_message("Computing multisort algorithm (n=%d) ", bots_arg_size);
#pragma omp parallel
#pragma omp single nowait
#pragma omp task
cilksort_par(array, tmp, bots_arg_size);
bots_message(" completed!\n");
}
int sort_verify ( void )
{
int i, success = 1;
for (i = 0; i < bots_arg_size; ++i)
if (array[i] != i)
success = 0;
return success ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL;
}
|
GB_unop__one_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__one_fp64_fp64
// op(A') function: GB_unop_tran__one_fp64_fp64
// C type: double
// A type: double
// cast: ;
// unaryop: cij = 1
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1 ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = 1 ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__one_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = 1 ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = 1 ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__one_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix_op-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file matrix_op-inl.h
* \brief Function definition of matrix related operators
*/
#ifndef MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#define MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <string>
#include <algorithm>
#include <utility>
#include <type_traits>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "../channel_op_common.h"
#include "../mxnet_op.h"
#include "broadcast_reduce_op.h"
#include "./init_op.h"
#include "../../common/static_array.h"
#include "./slice-inl.h"
#if MXNET_USE_CUDA
#include <thrust/device_vector.h>
#endif
#ifdef __CUDACC__
#include "./pseudo2DTranspose_op-inl.cuh"
#endif
namespace mxnet {
namespace op {
struct ReshapeParam : public dmlc::Parameter<ReshapeParam> {
mxnet::TShape target_shape;
bool keep_highest;
mxnet::Tuple<int> shape;
bool reverse;
DMLC_DECLARE_PARAMETER(ReshapeParam) {
DMLC_DECLARE_FIELD(shape).set_default(mxnet::Tuple<int>()).describe("The target shape");
DMLC_DECLARE_FIELD(reverse).set_default(false).describe(
"If true then the special values are inferred from right to left");
DMLC_DECLARE_FIELD(target_shape)
.set_default(mxnet::TShape(0, -1))
.describe(
"(Deprecated! Use ``shape`` instead.) "
"Target new shape. One and only one dim can be 0, "
"in which case it will be inferred from the rest of dims");
DMLC_DECLARE_FIELD(keep_highest)
.set_default(false)
.describe(
"(Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged."
"If set to true, then the first dim in target_shape is ignored,"
"and always fixed as input");
}
bool operator==(const ReshapeParam& other) const {
return this->target_shape == other.target_shape && this->keep_highest == other.keep_highest &&
this->shape == other.shape && this->reverse == other.reverse;
}
};
#if MXNET_USE_ONEDNN == 1
bool ReshapeStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs);
void ReshapeComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs);
#endif // MXNET_USE_ONEDNN == 1
template <typename IType>
inline mxnet::TShape InferReshapeShape(const mxnet::Tuple<IType>& shape,
const mxnet::TShape& dshape,
bool reverse) {
std::vector<IType> dshape_vec;
std::vector<IType> param_shape_vec(shape.begin(), shape.end());
for (int i = 0; i < dshape.ndim(); ++i) {
dshape_vec.push_back(dshape[i]);
}
std::vector<IType> tmp;
size_t src_idx = 0;
int inf_idx = -1;
if (reverse) {
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
}
auto dshape_len = dshape_vec.size();
auto params_len = param_shape_vec.size();
for (size_t i = 0; i < params_len; ++i) {
IType proposed_dim = param_shape_vec[i];
if (proposed_dim == 0) {
// keep same
CHECK_LT(src_idx, dshape_len);
tmp.push_back(dshape_vec[src_idx++]);
} else if (proposed_dim == -1) {
// infer
CHECK_LT(inf_idx, 0) << "One and only one dim can be inferred";
inf_idx = i;
tmp.push_back(1);
src_idx++;
} else if (proposed_dim == -2) {
// copy all remaining dims from source
while (src_idx < dshape_len) {
const int dn = dshape_vec[src_idx++];
tmp.push_back(dn);
}
} else if (proposed_dim == -3) {
// merge two dims from source
CHECK_LT(src_idx, dshape_len - 1);
const int d1 = dshape_vec[src_idx++];
const int d2 = dshape_vec[src_idx++];
if (!mxnet::dim_size_is_known(d1) || !mxnet::dim_size_is_known(d2)) {
tmp.push_back(-1);
} else {
tmp.push_back(d1 * d2);
}
} else if (proposed_dim == -4) {
// split the source dim s into two dims
// read the left dim and then the right dim (either can be -1)
CHECK_LT(i + 2, params_len);
CHECK_LT(src_idx, dshape_len);
const int d0 = dshape_vec[src_idx++];
IType d1 = param_shape_vec[++i];
IType d2 = param_shape_vec[++i];
CHECK(d1 != -1 || d2 != -1) << "Split dims cannot both be -1.";
if (d1 == -1 && d0 >= 0)
d1 = d0 / d2; // d0 must be known to do this
if (d2 == -1 && d0 >= 0)
d2 = d0 / d1; // d0 must be known to do this
CHECK(d1 * d2 == static_cast<IType>(d0) || static_cast<IType>(d0) == IType(-1))
<< "Split dims " << d1 << ", " << d2 << " do not divide original dim " << d0;
tmp.push_back(d1);
tmp.push_back(d2);
} else {
// greater than 0, new shape
tmp.push_back(proposed_dim);
src_idx++;
}
}
if (inf_idx >= 0) {
if (shape_is_known(dshape)) {
IType new_size = 1;
for (IType x : tmp)
new_size *= x;
tmp[inf_idx] = dshape.Size() / new_size;
} else {
tmp[inf_idx] = -1;
}
}
if (reverse) {
std::reverse(param_shape_vec.begin(), param_shape_vec.end());
std::reverse(dshape_vec.begin(), dshape_vec.end());
std::reverse(tmp.begin(), tmp.end());
}
mxnet::TShape oshape(tmp.begin(), tmp.end());
return oshape;
}
inline bool ReverseReshapeInferShape(mxnet::TShape* in, const mxnet::TShape& out) {
if (shape_is_known(*in) && shape_is_known(out)) {
return true;
} else if (!shape_is_known(out)) {
return false;
} else {
int zero_axis = -1;
int known_dim_size_prod = 1;
for (int i = 0; i < in->ndim(); i++) {
if (!mxnet::dim_size_is_known(*in, i)) {
if (zero_axis != -1)
return false; // more than 1 zero found.
else
zero_axis = i;
} else {
known_dim_size_prod *= (*in)[i];
}
}
(*in)[zero_axis] = out.Size() / known_dim_size_prod;
return true;
}
}
inline bool ReshapeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const ReshapeParam& param_ = nnvm::get<ReshapeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape))
return false;
mxnet::TShape oshape;
if (param_.shape.ndim() != 0) {
oshape = InferReshapeShape(param_.shape, dshape, param_.reverse);
} else if (param_.target_shape.ndim() != -1) {
LOG(INFO) << "Using target_shape will be deprecated.";
oshape = param_.target_shape;
int neg_count = 0;
index_t inf_idx = 0;
index_t start_idx = param_.keep_highest ? 1 : 0;
if (param_.keep_highest) {
oshape[0] = dshape[0];
}
for (int i = start_idx; i < oshape.ndim(); ++i) {
if (oshape[i] == 0) {
neg_count++;
inf_idx = i;
}
}
if (neg_count == 1) {
oshape[inf_idx] = 1;
oshape[inf_idx] = dshape.Size() / oshape.Size();
}
} else {
return shape_is_known((*out_attrs)[0]) &&
ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
ReverseReshapeInferShape(&dshape, oshape);
#if 0
CHECK_EQ(oshape.Size(), dshape.Size())
<< "Target shape size is different to source. "
<< "Target: " << oshape
<< "\nSource: " << dshape;
#endif
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return ReverseReshapeInferShape(&(*in_attrs)[0], (*out_attrs)[0]);
}
inline bool FlattenShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape))
return false;
size_t target_dim = 1;
for (int i = 1; i < dshape.ndim(); ++i) {
target_dim *= dshape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::Shape2(dshape[0], target_dim));
return true;
}
struct TransposeParam : public dmlc::Parameter<TransposeParam> {
mxnet::TShape axes;
DMLC_DECLARE_PARAMETER(TransposeParam) {
DMLC_DECLARE_FIELD(axes)
.set_default(mxnet::TShape(0, -1))
.describe("Target axis order. By default the axes will be inverted.");
}
bool operator==(const TransposeParam& other) const {
return this->axes == other.axes;
}
};
/*!
* \brief This function performs transpose operation on a 2D matrix by utilizing the L1 cache
* \param in input tensor
* \param out output tensor
* \param row shape of dim 0 of input
* \param col shape of dim 1 of input
* \tparam DType Data type
* \tparam is_addto
*/
template <typename DType, bool is_addto>
MSHADOW_XINLINE void Transpose2D(const DType* in, DType* out, index_t row, index_t col) {
// ensure cache line hits and prevent cache miss for any configuration
// L1 cache size to be utilized = 32kb = 2^15
// Largest size of a single unit of any dtype <= 8 byte = 2^3
// Number of elements - (2^15/2^3) = 2^12
// Block-size - 2^6 v 2^6 (64 v 64)
// But we could leverage unrolling of for loops (for parallelization)
// Block-size - 2^5 v 2^5 (32 v 32) with potential 4 pragma for loop unrolled
// blocksize * blocksize * num_threads = cache_size / dtype_size
// Instead of explicit unroll, let compiler figure out optimal unroll factor
const index_t blocksize = 32;
// collapse 2 parallelizes 2 for loops
// inner 2 for loops aren't parallelized to prevent cache miss
// Microsoft Visual C++ compiler does not support omp collapse
#ifdef _MSC_VER
#pragma omp parallel for
#else
#pragma omp parallel for collapse(2)
#endif // _MSC_VER
for (index_t i = 0; i < row; i += blocksize) {
for (index_t j = 0; j < col; j += blocksize) {
// transpose the block
for (index_t a = j; (a < blocksize + j) && (a < col); ++a) {
for (index_t b = i; (b < blocksize + i) && (b < row); ++b) {
if (!is_addto) {
out[a * row + b] = in[b * col + a];
} else {
out[a * row + b] += in[b * col + a];
}
}
}
}
}
}
inline bool IsIdentityTranspose(const TShape& axes) {
for (dim_t i = 0; i < axes.ndim(); i++) {
if (axes[i] != i)
return false;
}
return true;
}
template <typename xpu, bool is_addto = false>
bool TransposeCommonImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes) {
// return true when running successfully, otherwise false
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(src.type_flag_, ret.type_flag_);
// zero-size tensor, no need to compute
if (src.shape_.Size() == 0U)
return true;
Stream<xpu>* s = ctx.get_stream<xpu>();
#ifdef __CUDACC__
// This transpose can be used only if there exist n and m such that:
// params = (0, ..., n-1, n+m, ..., params.size, n, ..., n+m-1)
// Example: (0, 2, 3, 1) or (0, 3, 1, 2), but not (0, 2, 1, 3).
if (isPseudo2DTranspose(axes)) {
MSHADOW_TYPE_SWITCH(
ret.type_flag_, DType, { transpose_pseudo2D<DType, is_addto>(ret, src, axes, s); });
return true;
}
#endif
// Special handle the identity case
if (IsIdentityTranspose(axes)) {
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
Tensor<xpu, 1, DType> in = src.get_with_shape<xpu, 1, DType>(mshadow::Shape1(src.Size()), s);
Tensor<xpu, 1, DType> out = ret.get_with_shape<xpu, 1, DType>(mshadow::Shape1(ret.Size()), s);
if (!is_addto) {
// Use memcpy to accelerate the speed
Copy(out, in, s);
} else {
mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, kAddTo>, xpu>::Launch(
s, ret.Size(), out.dptr_, in.dptr_);
}
});
return true;
}
// Handle the general transpose case
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
switch (axes.ndim()) {
case 2: {
Tensor<xpu, 2, DType> in = src.get<xpu, 2, DType>(s);
Tensor<xpu, 2, DType> out = ret.get<xpu, 2, DType>(s);
if (ctx.get_ctx().dev_mask() == cpu::kDevMask) {
Transpose2D<DType, is_addto>(in.dptr_, out.dptr_, in.shape_[0], in.shape_[1]);
} else {
LOG(FATAL) << "Not Implemented. We should never reach here because the 2D case "
"in GPU has been covered by transpose_pseudo2D."
" Report an issue in Github.";
}
break;
}
case 3: {
Tensor<xpu, 3, DType> in = src.get<xpu, 3, DType>(s);
Tensor<xpu, 3, DType> out = ret.get<xpu, 3, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<3>());
} else {
out += transpose(in, axes.get<3>());
}
break;
}
case 4: {
Tensor<xpu, 4, DType> in = src.get<xpu, 4, DType>(s);
Tensor<xpu, 4, DType> out = ret.get<xpu, 4, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<4>());
} else {
out += transpose(in, axes.get<4>());
}
break;
}
case 5: {
Tensor<xpu, 5, DType> in = src.get<xpu, 5, DType>(s);
Tensor<xpu, 5, DType> out = ret.get<xpu, 5, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<5>());
} else {
out += transpose(in, axes.get<5>());
}
break;
}
case 6: {
Tensor<xpu, 6, DType> in = src.get<xpu, 6, DType>(s);
Tensor<xpu, 6, DType> out = ret.get<xpu, 6, DType>(s);
if (!is_addto) {
out = transpose(in, axes.get<6>());
} else {
out += transpose(in, axes.get<6>());
}
break;
}
default:
// return false when dimensions > 6
return false;
break;
}
});
return true;
}
template <typename xpu, bool is_addto = false>
void TransposeImpl(RunContext ctx, const TBlob& src, const TBlob& ret, const mxnet::TShape& axes) {
CHECK_LE(axes.ndim(), 6) << "TransposeImpl supports at most 6 dimensions";
CHECK((TransposeCommonImpl<xpu, is_addto>(ctx, src, ret, axes)))
<< "Failed to execute TransposeImpl Operator";
}
template <bool is_addto>
struct TransposeExKernel {
/*!
* \brief
* \param tid global thread id
* \param out_data output data
* \param in_data input data
* \param strides input strides and output strides
* \param ndim the number of dimension
*/
template <typename DType>
MSHADOW_XINLINE static void Map(index_t tid,
DType* out_data,
const DType* in_data,
const dim_t* strides,
const int ndim) {
// tid is the index of input data
const dim_t* const out_strides = strides + ndim;
index_t k = tid;
index_t out_id = 0;
for (int i = 0; i < ndim; ++i) {
out_id += (k / strides[i]) * out_strides[i];
k %= strides[i];
}
if (is_addto)
out_data[out_id] += in_data[tid];
else
out_data[out_id] = in_data[tid];
}
};
template <typename xpu, bool is_addto = false>
void TransposeExImpl(RunContext ctx,
const TBlob& src,
const TBlob& ret,
const mxnet::TShape& axes,
mshadow::Tensor<xpu, 1, dim_t>& strides_xpu) { // NOLINT(*)
/*
* If ndim <= 6, it is not necessary to allocate any space for `strides_xpu`
* If ndim > 6, `strides_xpu` should be allocated `ndim * 2` elements
*/
using namespace mshadow;
using namespace mshadow::expr;
if (TransposeCommonImpl<xpu, is_addto>(ctx, src, ret, axes))
return;
CHECK_GT(axes.ndim(), 6) << "Failed to execute TransposeExImpl when axes.ndim() <= 6";
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(ret.type_flag_, DType, {
CHECK_EQ(strides_xpu.MSize(), axes.ndim() * 2)
<< "If ndim > 6, `strides_xpu` should be allocated `ndim * 2` elements";
const mxnet::TShape& in_shape = src.shape_;
// strides: in_strides and out_strides
const int ndim = axes.ndim();
std::vector<dim_t> strides(ndim * 2);
// compute in_strides
strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
strides[i] = strides[i + 1] * in_shape[i + 1];
}
// compute out_strides
std::vector<dim_t> tmp_strides(ndim);
tmp_strides[ndim - 1] = 1;
for (int i = ndim - 2; i >= 0; --i) {
tmp_strides[i] = tmp_strides[i + 1] * in_shape[axes[i + 1]];
}
// reorder tmp_strides to out_strides
dim_t* const out_strides = &strides[ndim];
for (int i = 0; i < ndim; ++i) {
out_strides[axes[i]] = tmp_strides[i];
}
Shape<1> strides_shape;
strides_shape[0] = ndim * 2;
Tensor<cpu, 1, dim_t> strides_cpu(strides.data(), strides_shape);
// copy arguments into xpu context
Copy(strides_xpu, strides_cpu, s);
const DType* in = src.dptr<DType>();
DType* out = ret.dptr<DType>();
if (is_addto) {
mxnet_op::Kernel<TransposeExKernel<true>, xpu>::Launch(
s, in_shape.Size(), out, in, strides_xpu.dptr_, ndim);
} else {
mxnet_op::Kernel<TransposeExKernel<false>, xpu>::Launch(
s, in_shape.Size(), out, in, strides_xpu.dptr_, ndim);
}
});
}
template <typename xpu>
mshadow::Tensor<xpu, 1, dim_t> GetTransposeExWorkspace(const OpContext& ctx,
const mxnet::TShape& axes) {
if (axes.ndim() > 6) {
// allocate workspace when axes.ndim() > 6
mshadow::Shape<1> strides_shape;
strides_shape[0] = axes.ndim() * 2;
return ctx.requested[0].get_space_typed<xpu, 1, dim_t>(strides_shape, ctx.get_stream<xpu>());
}
return {};
}
// matrix transpose
template <typename xpu>
void Transpose(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (req[0] == kNullOp) {
return;
}
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK(req[0] == kWriteTo || req[0] == kAddTo)
<< "Transpose only supports kNullOp, kWriteTo and kAddTo";
mxnet::TShape axes;
if (param.axes.ndim() == 0) {
axes = mxnet::TShape(inputs[0].ndim(), -1);
for (int i = 0; i < axes.ndim(); ++i) {
axes[i] = axes.ndim() - 1 - i;
}
} else {
axes = common::CanonicalizeAxes(param.axes);
}
mshadow::Tensor<xpu, 1, dim_t> workspace = GetTransposeExWorkspace<xpu>(ctx, axes);
if (req[0] == kAddTo) {
TransposeExImpl<xpu, true>(ctx.run_ctx, inputs[0], outputs[0], axes, workspace);
} else {
TransposeExImpl<xpu, false>(ctx.run_ctx, inputs[0], outputs[0], axes, workspace);
}
}
inline bool TransposeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const TransposeParam& param = nnvm::get<TransposeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& shp = (*in_attrs)[0];
mxnet::TShape& out_shp = (*out_attrs)[0];
if (!mxnet::ndim_is_known(shp) && !mxnet::ndim_is_known(out_shp))
return false; // none of the shapes is known
if (out_shp.ndim() >= 0 && shp.ndim() >= 0)
CHECK_EQ(out_shp.ndim(), shp.ndim());
mxnet::TShape get(std::max(shp.ndim(), out_shp.ndim()), -1);
mxnet::TShape ret(std::max(shp.ndim(), out_shp.ndim()), -1);
if (param.axes.ndim() == 0) {
for (int i = 0; i < shp.ndim(); ++i) {
ret[i] = shp[shp.ndim() - 1 - i];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[shp.ndim() - 1 - i] = out_shp[i];
}
} else {
CHECK_EQ(std::max(shp.ndim(), out_shp.ndim()), param.axes.ndim());
for (int i = 0; i < shp.ndim(); ++i) {
CHECK(param.axes[i] < static_cast<int64_t>(shp.ndim()));
ret[i] = shp[param.axes[i]];
}
for (int i = 0; i < out_shp.ndim(); ++i) {
get[param.axes[i]] = out_shp[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 0, get);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
return shape_is_known(ret);
}
struct ExpandDimParam : public dmlc::Parameter<ExpandDimParam> {
int axis;
DMLC_DECLARE_PARAMETER(ExpandDimParam) {
DMLC_DECLARE_FIELD(axis).describe(
"Position where new axis is to be inserted. Suppose that "
"the input `NDArray`'s dimension is `ndim`, the range of "
"the inserted axis is `[-ndim, ndim]`");
}
bool operator==(const ExpandDimParam& other) const {
return this->axis == other.axis;
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
inline bool ExpandDimShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const ExpandDimParam& param = nnvm::get<ExpandDimParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& oshape = (*out_attrs)[0];
if (!mxnet::ndim_is_known(ishape) && !mxnet::ndim_is_known(oshape)) {
return false;
}
int indim = ishape.ndim();
bool unknown_ishape = false;
if (-1 == indim) {
indim = oshape.ndim() - 1;
unknown_ishape = true;
}
int axis = param.axis;
if (axis < 0) {
axis += indim + 1;
}
CHECK(axis >= 0 && axis <= indim) << "axis must be in the range [" << -indim << ", " << indim
<< "] (" << param.axis << " provided)";
mxnet::TShape ret(indim + 1, -1);
for (int i = 0; i < axis; ++i) {
ret[i] = (unknown_ishape ? -1 : ishape[i]);
}
ret[axis] = 1;
for (int i = axis + 1; i < indim + 1; ++i) {
ret[i] = (unknown_ishape ? -1 : ishape[i - 1]);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret);
ret = mxnet::TShape(indim, -1);
for (int i = 0; i < axis; ++i)
ret[i] = oshape[i];
for (int i = axis + 1; i < indim + 1; ++i)
ret[i - 1] = oshape[i];
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ret);
return shape_is_known(in_attrs->at(0)) && shape_is_known(out_attrs->at(0));
}
// Currently MKLDNN only supports step = 1 or step has no value
inline bool SupportMKLDNNSlice(const SliceParam& param) {
if (param.step.ndim() == 0U)
return true;
for (int i = 0; i < param.step.ndim(); ++i) {
if (param.step[i].has_value() && param.step[i].value() != 1)
return false;
}
return true;
}
inline bool SliceForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
const auto& in_stype = in_attrs->at(0);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const auto dispatch_ex = DispatchMode::kFComputeEx;
// If step = 1, no need to fallback; otherwise fallback to dense
bool trivial_step = false;
if (param.step.ndim() == 0U) {
trivial_step = true;
} else if (param.step.ndim() == 1U &&
(!param.step[0].has_value() || param.step[0].value() == 1)) {
trivial_step = true;
}
if (in_stype == kDefaultStorage) {
#if MXNET_USE_ONEDNN == 1
if (dev_mask == Context::kCPU && MKLDNNEnvSet() && SupportMKLDNNSlice(param)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, dispatch_ex);
}
#endif
if (!dispatched) {
dispatched =
storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute);
}
}
if (!dispatched && in_stype == kCSRStorage && trivial_step) {
dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
// slice the indptr of a csr
struct SliceCsrIndPtr {
template <typename IType>
MSHADOW_XINLINE static void Map(int i, IType* out, const IType* in, const IType* base) {
KERNEL_ASSIGN(out[i], kWriteTo, in[i] - *base);
}
};
/*
* a wrapper to launch SliceCsrIndPtr kernel.
* slice [src[begin] .. src[end]) and store in dst[0, end - begin)
*/
template <typename xpu, typename IType>
void SliceCsrIndPtrImpl(const int begin,
const int end,
RunContext ctx,
const IType* src,
IType* dst) {
using namespace mshadow;
using namespace mxnet_op;
Stream<xpu>* s = ctx.get_stream<xpu>();
int indptr_len = end - begin + 1;
Kernel<SliceCsrIndPtr, xpu>::Launch(s, indptr_len, dst, src + begin, src + begin);
}
/*
* Slice a CSR NDArray for first dimension
*/
template <typename xpu>
void SliceDimOneCsrImpl(const mxnet::TShape& begin,
const mxnet::TShape& end,
const OpContext& ctx,
const NDArray& in,
const NDArray& out) {
using namespace mshadow;
using namespace mxnet_op;
using namespace csr;
nnvm::dim_t begin_row = begin[0];
nnvm::dim_t end_row = end[0];
nnvm::dim_t indptr_len = end_row - begin_row + 1;
out.CheckAndAllocAuxData(kIndPtr, Shape1(indptr_len));
// assume idx indptr share the same type
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(in.aux_type(kIdx), IType, {
MSHADOW_TYPE_SWITCH(in.dtype(), DType, {
RType* in_indptr = in.aux_data(kIndPtr).dptr<RType>();
RType* out_indptr = out.aux_data(kIndPtr).dptr<RType>();
SliceCsrIndPtrImpl<xpu, RType>(begin_row, end_row, ctx.run_ctx, in_indptr, out_indptr);
Stream<xpu>* s = ctx.get_stream<xpu>();
RType nnz = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&nnz, Shape1(1)),
Tensor<xpu, 1, RType>(out_indptr + indptr_len - 1, Shape1(1), s));
// return csr zeros if nnz = 0
if (nnz == 0) {
out.set_aux_shape(kIdx, Shape1(0));
return;
}
// copy indices and values
out.CheckAndAllocAuxData(kIdx, Shape1(nnz));
out.CheckAndAllocData(Shape1(nnz));
IType* in_idx = in.aux_data(kIdx).dptr<IType>();
IType* out_idx = out.aux_data(kIdx).dptr<IType>();
DType* in_data = in.data().dptr<DType>();
DType* out_data = out.data().dptr<DType>();
RType offset = 0;
mshadow::Copy(Tensor<cpu, 1, RType>(&offset, Shape1(1)),
Tensor<xpu, 1, RType>(in_indptr + begin_row, Shape1(1), s));
mshadow::Copy(Tensor<xpu, 1, IType>(out_idx, Shape1(nnz), s),
Tensor<xpu, 1, IType>(in_idx + offset, Shape1(nnz), s),
s);
mshadow::Copy(Tensor<xpu, 1, DType>(out_data, Shape1(nnz), s),
Tensor<xpu, 1, DType>(in_data + offset, Shape1(nnz), s),
s);
});
});
});
}
/*!
* \brief slice a CSRNDArray for two dimensions
*/
struct SliceDimTwoCsrAssign {
/*!
* \brief This function slices a CSRNDArray on axis one between begin_col and end_col
* \param i loop index
* \param out_idx output csr ndarray column indices
* \param out_data output csr ndarray data
* \param out_indptr output csr ndarray row index pointer
* \param in_idx input csr ndarray column indices
* \param in_data input csr ndarray data
* \param in_indptr input csr ndarray row index pointer
* \param begin_col begin column indice
* \param end_col end column indice
*/
template <typename IType, typename RType, typename DType>
MSHADOW_XINLINE static void Map(int i,
IType* out_idx,
DType* out_data,
const RType* out_indptr,
const IType* in_idx,
const DType* in_data,
const RType* in_indptr,
const int begin_col,
const int end_col) {
RType ind = out_indptr[i];
for (RType j = in_indptr[i]; j < in_indptr[i + 1]; j++) {
// indices of CSRNDArray are in ascending order per row
if (in_idx[j] >= end_col) {
break;
} else if (in_idx[j] >= begin_col) {
out_idx[ind] = in_idx[j] - begin_col;
out_data[ind] = in_data[j];
ind++;
}
}
}
};
/*
* Slice a CSR NDArray for two dimensions
*/
template <typename xpu>
void SliceDimTwoCsrImpl(const mxnet::TShape& begin,
const mxnet::TShape& end,
const OpContext& ctx,
const NDArray& in,
const NDArray& out);
template <typename xpu>
void SliceCsrImpl(const SliceParam& param,
const OpContext& ctx,
const NDArray& in,
OpReqType req,
const NDArray& out) {
if (req == kNullOp)
return;
CHECK_NE(req, kAddTo) << "kAddTo for Slice on CSR input is not supported";
CHECK_NE(req, kWriteInplace) << "kWriteInplace for Slice on CSR input is not supported";
const mxnet::TShape ishape = in.shape();
const mxnet::TShape oshape = out.shape();
int N = ishape.ndim();
mxnet::TShape begin(N, -1), end(N, -1);
for (int i = 0; i < N; ++i) {
int s = 0;
if (i < param.begin.ndim() && param.begin[i]) {
s = *param.begin[i];
if (s < 0)
s += ishape[i];
}
begin[i] = s;
end[i] = s + oshape[i];
}
switch (N) {
case 1: {
SliceDimOneCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
case 2: {
SliceDimTwoCsrImpl<xpu>(begin, end, ctx, in, out);
break;
}
default:
LOG(FATAL) << "CSR is only for 2-D shape";
break;
}
}
template <typename xpu>
void SliceEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
auto in_stype = inputs[0].storage_type();
if (in_stype == kCSRStorage) {
SliceCsrImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
} else {
LOG(FATAL) << "Slice not implemented for storage type" << in_stype;
}
}
template <int ndim>
inline bool GetIndexRange(const mxnet::TShape& dshape,
const mxnet::Tuple<dmlc::optional<index_t>>& param_begin,
const mxnet::Tuple<dmlc::optional<index_t>>& param_end,
const mxnet::Tuple<dmlc::optional<index_t>>& param_step,
common::StaticArray<index_t, ndim>* begin,
common::StaticArray<index_t, ndim>* end,
common::StaticArray<index_t, ndim>* step) {
// Function returns false if output is zero-sized, true otherwise.
bool zero_size_shape = false;
CHECK_NE(dshape.ndim(), 0U);
CHECK_LE(param_begin.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions";
CHECK_LE(param_end.ndim(), dshape.ndim()) << "Slicing axis exceeds data dimensions";
CHECK_EQ(param_begin.ndim(), param_end.ndim()) << "begin and end must have the same length";
CHECK_EQ(ndim, dshape.ndim()) << "Static array size=" << ndim
<< " is not equal to data shape ndim=" << dshape.ndim();
if (param_step.ndim() > 0) {
CHECK_EQ(param_step.ndim(), param_begin.ndim()) << "step and begin must have the same length";
}
for (int i = 0; i < param_begin.ndim(); ++i) {
index_t s = param_step.ndim() > 0 && param_step[i].has_value() ? param_step[i].value() : 1;
CHECK_NE(s, 0) << "slice op step[" << i << "] cannot be 0";
index_t b = 0, e = 0;
const index_t len = dshape[i];
if (len > 0) {
b = param_begin[i].has_value() ? param_begin[i].value() : (s < 0 ? len - 1 : 0);
e = param_end[i].has_value() ? param_end[i].value() : (s < 0 ? -1 : len);
if (b < 0) {
b += len;
}
if (e < 0 && param_end[i].has_value()) {
e += len;
}
// move the begin and end to correct position for calculating dim size
b = (b < 0 && s > 0) ? 0 : b;
b = (b > len - 1 && s < 0) ? len - 1 : b;
// if the start value lead to empty tensor under step s, use -1 for indication
b = (b < 0 || b > len - 1) ? -1 : b;
e = e > -1 ? e : -1;
e = e > len ? len : e;
} else if (len == 0) {
b = 0;
e = 0;
}
(*begin)[i] = b;
(*end)[i] = e;
(*step)[i] = s;
// checking begin==end
if (b == e) {
zero_size_shape = true;
}
}
for (int i = param_begin.ndim(); i < dshape.ndim(); ++i) {
(*begin)[i] = 0;
(*end)[i] = dshape[i];
(*step)[i] = 1;
}
return zero_size_shape;
}
inline void SetSliceOpOutputDimSize(const mxnet::TShape& dshape,
const index_t i,
const index_t b,
const index_t e,
const index_t s,
mxnet::TShape* oshape) {
if (!mxnet::dim_size_is_known(dshape, i)) {
(*oshape)[i] = -1;
return;
}
if (e != b && b >= 0) {
if (s > 0) {
(*oshape)[i] = e > b ? (e - b - 1) / s + 1 : 0;
} else {
(*oshape)[i] = e < b ? (b - e - 1) / (-s) + 1 : 0;
}
} else {
(*oshape)[i] = 0;
}
}
inline bool SliceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape))
return false;
CHECK_GT(dshape.ndim(), 0) << "slice only works for ndim > 0";
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
mxnet::TShape oshape = dshape;
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &oshape);
}
})
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(dshape) && shape_is_known(oshape);
}
template <int ndim, int req, typename xpu>
struct slice_forward;
template <int ndim, int req>
struct slice_forward<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim - 1];
const index_t out_last_dim_size = oshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(
out[i], req, data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
};
template <int ndim, int req>
struct slice_forward<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* data,
const mshadow::Shape<ndim> dshape,
const mshadow::Shape<ndim> oshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = dshape[ndim - 1];
const index_t out_last_dim_size = oshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
index_t out_offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D data
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % oshape[k]) * step[k] + begin[k]);
idx /= oshape[k];
stride *= dshape[k];
}
KERNEL_ASSIGN(out[out_offset++],
req,
data[irow * data_last_dim_size + j * step_last_dim + begin_last_dim]);
}
}
};
template <typename xpu>
void SliceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp)
return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (out.Size() == 0)
return;
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
size_t num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
out.dptr<DType>(),
data.dptr<DType>(),
data.shape_.get<ndim>(),
out.shape_.get<ndim>(),
begin,
step);
})})
})
}
template <int ndim, int req, typename xpu>
struct slice_assign;
template <int ndim, int req>
struct slice_assign<ndim, req, cpu> {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim - 1];
const index_t out_last_dim_size = vshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
index_t offset = i * out_last_dim_size;
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(
out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[offset++]);
}
}
};
template <int ndim, int req>
struct slice_assign<ndim, req, gpu> {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* val,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim - 1];
const index_t out_last_dim_size = vshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
const index_t j = i % out_last_dim_size;
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i / out_last_dim_size;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val[i]);
}
};
template <typename xpu>
void SliceOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
if (req[0] == kNullOp)
return;
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_backward does not support kWriteInplace";
}
if (ograd.Size() == 0)
return;
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(igrad.shape_, param.begin, param.end, param.step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
igrad.dptr<DType>(),
ograd.dptr<DType>(),
igrad.shape_.get<ndim>(),
ograd.shape_.get<ndim>(),
begin,
step);
})})
})
}
inline bool SliceAssignOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(dshape))
return false;
mxnet::TShape vshape = dshape; // vshape is the value shape on the right hand side
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(dshape.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(dshape, param.begin, param.end, param.step, &begin, &end, &step);
for (int i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(dshape, i, b, e, s, &vshape);
}
})
SHAPE_ASSIGN_CHECK(*in_attrs, 1, vshape);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template <typename xpu>
void SliceAssignOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 2U); // data[index] = val, data and val are two inputs
CHECK_EQ(outputs.size(), 1U);
if (req[0] == kNullOp)
return;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& val = inputs[1];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_slice_assign only supports kWriteTo and kWriteInplace";
}
const SliceParam& param = nnvm::get<SliceParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape =
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspace needs no operation.
}
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
index_t num_threads = val.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= val.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
out.dptr<DType>(),
val.dptr<DType>(),
out.shape_.get<ndim>(),
val.shape_.get<ndim>(),
begin,
step);
})})
})
}
struct SliceAssignScalarParam : public dmlc::Parameter<SliceAssignScalarParam> {
double scalar;
mxnet::Tuple<dmlc::optional<index_t>> begin, end;
mxnet::Tuple<dmlc::optional<index_t>> step;
DMLC_DECLARE_PARAMETER(SliceAssignScalarParam) {
DMLC_DECLARE_FIELD(scalar).set_default(0).describe("The scalar value for assignment.");
DMLC_DECLARE_FIELD(begin).describe(
"starting indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(end).describe(
"ending indices for the slice operation, supports negative indices.");
DMLC_DECLARE_FIELD(step)
.set_default(mxnet::Tuple<dmlc::optional<index_t>>())
.describe("step for the slice operation, supports negative values.");
}
};
inline bool SliceAssignScalarOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = (*in_attrs)[0];
if (!shape_is_known(dshape))
return false;
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape);
return true;
}
template <int ndim>
struct slice_assign_scalar {
// i is the i-th row after flattening out into 2D tensor
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType val,
const OpReqType req,
const mshadow::Shape<ndim> oshape,
const mshadow::Shape<ndim> vshape,
const common::StaticArray<index_t, ndim> begin,
const common::StaticArray<index_t, ndim> step) {
const index_t data_last_dim_size = oshape[ndim - 1];
const index_t out_last_dim_size = vshape[ndim - 1];
const index_t step_last_dim = step[ndim - 1];
const index_t begin_last_dim = begin[ndim - 1];
for (index_t j = 0; j < out_last_dim_size; ++j) {
index_t irow = 0; // row id of flattend 2D out
index_t stride = 1;
index_t idx = i;
#pragma unroll
for (int k = ndim - 2; k >= 0; --k) {
irow += stride * ((idx % vshape[k]) * step[k] + begin[k]);
idx /= vshape[k];
stride *= oshape[k];
}
KERNEL_ASSIGN(out[irow * data_last_dim_size + j * step_last_dim + begin_last_dim], req, val);
}
}
};
template <typename xpu>
void SliceAssignScalarOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
if (req[0] == kWriteTo) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> in = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> out = outputs[0].FlatTo1D<xpu, DType>(s);
Copy(out, in, s);
});
} else if (req[0] != kWriteInplace) {
LOG(FATAL) << "_crop_assign_scalar only supports kWriteTo and kWriteInplace";
}
mxnet::TShape vshape = data.shape_;
const SliceAssignScalarParam& param = nnvm::get<SliceAssignScalarParam>(attrs.parsed);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
bool zero_size_shape =
GetIndexRange(data.shape_, param.begin, param.end, param.step, &begin, &end, &step);
if (zero_size_shape) {
return; // slice_assign of zero-sized subspaced needs no operation.
}
for (index_t i = 0; i < param.begin.ndim(); ++i) {
const index_t b = begin[i], e = end[i], s = step[i];
SetSliceOpOutputDimSize(data.shape_, i, b, e, s, &vshape);
}
MSHADOW_TYPE_SWITCH_WITH_BOOL(out.type_flag_, DType, {
mxnet_op::Kernel<slice_assign_scalar<ndim>, xpu>::Launch(s,
vshape.FlatTo2D()[0],
out.dptr<DType>(),
static_cast<DType>(param.scalar),
req[0],
out.shape_.get<ndim>(),
vshape.get<ndim>(),
begin,
step);
})
})
}
struct SliceAxisParam : public dmlc::Parameter<SliceAxisParam> {
int axis;
index_t begin;
dmlc::optional<index_t> end;
DMLC_DECLARE_PARAMETER(SliceAxisParam) {
DMLC_DECLARE_FIELD(axis).describe("Axis along which to be sliced, supports negative indexes.");
DMLC_DECLARE_FIELD(begin).describe(
"The beginning index along the axis to be sliced, "
" supports negative indexes.");
DMLC_DECLARE_FIELD(end).describe(
"The ending index along the axis to be sliced, "
" supports negative indexes.");
}
};
inline void GetSliceAxisParams(const SliceAxisParam& param,
const mxnet::TShape& ishape,
int* axis,
index_t* begin,
index_t* end) {
*axis = param.axis;
if (*axis < 0) {
*axis += ishape.ndim();
}
CHECK(*axis < ishape.ndim() && *axis >= 0)
<< "Transformed axis must be smaller than the source ndim and larger than zero! Recieved "
"axis="
<< param.axis << ", src_ndim=" << ishape.ndim() << ", transformed axis=" << *axis;
index_t axis_size = static_cast<index_t>(ishape[*axis]);
*begin = param.begin;
*end = -1;
if (*begin < 0) {
*begin += axis_size;
}
if (axis_size > 0) {
if (!static_cast<bool>(param.end)) {
*end = axis_size;
} else {
*end = param.end.value();
if (*end < 0) {
*end += axis_size;
}
}
CHECK(*end <= axis_size) << "Invalid end for end=" << *end << " as axis_size is " << axis_size;
CHECK((*begin < *end)) << "Invalid begin, end, get begin=" << param.begin
<< ", end=" << param.end;
} else {
*begin = 0;
*end = 0;
}
CHECK(*end >= 0) << "Invalid begin, end, get begin=" << param.begin << ", end=" << param.end;
CHECK(*begin >= 0) << "Invalid begin for begin=" << param.begin;
}
inline bool SliceAxisShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape))
return false;
int axis;
index_t begin, end;
GetSliceAxisParams(param, ishape, &axis, &begin, &end);
if (!mxnet::dim_size_is_known(ishape, axis)) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return false;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = static_cast<index_t>(end - begin);
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
return shape_is_known(shape);
}
template <typename xpu>
void SliceAxis(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow::expr;
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, inputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> in = inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> out = outputs[0].FlatTo2D<xpu, DType>(s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> in = inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> out = outputs[0].FlatTo3D<xpu, DType>(axis, s);
ASSIGN_DISPATCH(out, req[0], slice<1>(in, begin, end));
});
}
}
// Backward pass of broadcast over the given axis
template <typename xpu>
void SliceAxisGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
if (outputs[0].shape_.Size() == 0) {
return;
}
const SliceAxisParam& param = nnvm::get<SliceAxisParam>(attrs.parsed);
using namespace mshadow::op;
using namespace mshadow::expr;
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
int axis;
index_t begin, end;
GetSliceAxisParams(param, outputs[0].shape_, &axis, &begin, &end);
int ndim = outputs[0].shape_.ndim();
if (axis + 1 == ndim) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 2, DType> ograd = inputs[0].FlatTo2D<xpu, DType>(s);
mshadow::Tensor<xpu, 2, DType> igrad = outputs[0].FlatTo2D<xpu, DType>(s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
} else {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mshadow::Tensor<xpu, 3, DType> ograd = inputs[0].FlatTo3D<xpu, DType>(axis, s);
mshadow::Tensor<xpu, 3, DType> igrad = outputs[0].FlatTo3D<xpu, DType>(axis, s);
if (req[0] == kAddTo) {
slice<1>(igrad, begin, end) += F<identity>(ograd);
} else if (req[0] == kWriteTo) {
igrad = 0.0f;
slice<1>(igrad, begin, end) = F<identity>(ograd);
} else {
CHECK_EQ(req[0], kNullOp);
}
});
}
}
struct SliceLikeParam : public dmlc::Parameter<SliceLikeParam> {
mxnet::Tuple<int> axes;
DMLC_DECLARE_PARAMETER(SliceLikeParam) {
DMLC_DECLARE_FIELD(axes)
.set_default(mxnet::Tuple<int>())
.describe(
"List of axes on which input data will be sliced according to the "
"corresponding size of the second input. By default will slice on "
"all axes. Negative axes are supported.");
}
};
inline bool SliceLikeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape& ishape = (*in_attrs)[0];
mxnet::TShape& from_shape = (*in_attrs)[1];
if (!mxnet::ndim_is_known(ishape) || !mxnet::ndim_is_known(from_shape)) {
return false;
}
if (param.axes.ndim() == 0) {
CHECK_EQ(ishape.ndim(), from_shape.ndim())
<< "By default slice_axis performs slice on all axes, but ndim mismatch "
"for inputs: "
<< ishape.ndim() << " vs. " << from_shape.ndim();
for (int i = 0; i < ishape.ndim(); ++i) {
CHECK_GE(ishape[i], from_shape[i]) << "Slice axis " << i << " with size " << from_shape[i]
<< "exceeds limit of input with size " << ishape[i];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, from_shape);
} else {
mxnet::TShape shape(ishape);
for (int i = 0; i < param.axes.ndim(); ++i) {
int axis = param.axes[i];
if (axis < 0) {
axis += ishape.ndim();
}
CHECK_GE(axis, 0) << "Slice axis: " << param.axes[i] << " too small";
CHECK_GT(ishape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds first input: " << ishape.ndim();
CHECK_GT(from_shape.ndim(), axis)
<< "Slice axis: " << axis << " exceeds second input: " << from_shape.ndim();
shape[axis] = from_shape[axis];
CHECK_GE(ishape[axis], from_shape[axis])
<< "Slice axis " << axis << " with size " << from_shape[axis]
<< "exceeds limit of input with size " << ishape[axis];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return true;
}
inline void SliceLikeInferRanges(const mxnet::TShape& dshape,
const mxnet::TShape& fshape,
const mxnet::Tuple<int>& axes,
mxnet::Tuple<dmlc::optional<index_t>>* param_begin,
mxnet::Tuple<dmlc::optional<index_t>>* param_end,
mxnet::Tuple<dmlc::optional<index_t>>* param_step) {
std::vector<dmlc::optional<index_t>> pb(dshape.ndim());
std::vector<dmlc::optional<index_t>> pe(dshape.ndim());
std::vector<dmlc::optional<index_t>> ps(dshape.ndim());
if (axes.ndim() == 0) {
for (int i = 0; i < dshape.ndim(); ++i) {
pb[i] = 0;
pe[i] = fshape[i];
ps[i] = 1;
}
} else {
for (int i = 0; i < axes.ndim(); ++i) {
int axis = axes[i];
if (axis < 0) {
axis += dshape.ndim();
}
CHECK_GE(axis, 0) << "Slice axis: " << axes[i] << " too small";
CHECK_LT(axis, dshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << dshape.ndim();
CHECK_LT(axis, fshape.ndim())
<< "Slice axis: " << axis << " exceeds first input: " << fshape.ndim();
pb[axis] = 0;
pe[axis] = fshape[axis];
ps[axis] = 1;
}
}
*param_begin = mxnet::Tuple<dmlc::optional<index_t>>(pb.begin(), pb.end());
*param_end = mxnet::Tuple<dmlc::optional<index_t>>(pe.begin(), pe.end());
*param_step = mxnet::Tuple<dmlc::optional<index_t>>(ps.begin(), ps.end());
}
template <typename xpu>
void SliceLikeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
using namespace mshadow::expr;
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& data = inputs[0];
const TBlob& out = outputs[0];
const mxnet::TShape& ishape = data.shape_;
const mxnet::TShape& from_shape = inputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(data.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(data.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(out.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = out.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= out.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_forward<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
out.dptr<DType>(),
data.dptr<DType>(),
data.shape_.get<ndim>(),
out.shape_.get<ndim>(),
begin,
step);
})})
})
}
template <typename xpu>
void SliceLikeBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 2U);
CHECK_EQ(req.size(), 2U);
using namespace mshadow;
Stream<xpu>* s = ctx.get_stream<xpu>();
if (req[1] != kNullOp && req[1] != kAddTo) {
Fill(s, outputs[1], req[1], 0); // Second input not relavant to gradients.
}
if (req[0] == kNullOp)
return;
const TBlob& ograd = inputs[0];
const TBlob& igrad = outputs[0];
const SliceLikeParam& param = nnvm::get<SliceLikeParam>(attrs.parsed);
if (req[0] == kWriteTo) {
Fill(s, igrad, req[0], 0);
} else if (req[0] == kWriteInplace) {
LOG(FATAL) << "_slice_like_backward does not support kWriteInplace";
}
const mxnet::TShape& ishape = ograd.shape_;
const mxnet::TShape& from_shape = outputs[1].shape_;
mxnet::Tuple<dmlc::optional<index_t>> param_begin;
mxnet::Tuple<dmlc::optional<index_t>> param_end;
mxnet::Tuple<dmlc::optional<index_t>> param_step;
SliceLikeInferRanges(ishape, from_shape, param.axes, ¶m_begin, ¶m_end, ¶m_step);
MXNET_NDIM_SWITCH(ograd.ndim(), ndim, {
common::StaticArray<index_t, ndim> begin, end, step;
GetIndexRange(ograd.shape_, param_begin, param_end, param_step, &begin, &end, &step);
MSHADOW_TYPE_SWITCH(ograd.type_flag_, DType, {MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
int num_threads = ograd.shape_.FlatTo2D()[0];
if (std::is_same<xpu, gpu>::value) {
num_threads *= ograd.shape_.get<ndim>()[ndim - 1];
}
mxnet_op::Kernel<slice_assign<ndim, Req, xpu>, xpu>::Launch(
s,
num_threads,
igrad.dptr<DType>(),
ograd.dptr<DType>(),
igrad.shape_.get<ndim>(),
ograd.shape_.get<ndim>(),
begin,
step);
})})
})
}
struct ClipParam : public dmlc::Parameter<ClipParam> {
real_t a_min, a_max;
DMLC_DECLARE_PARAMETER(ClipParam) {
DMLC_DECLARE_FIELD(a_min).describe("Minimum value");
DMLC_DECLARE_FIELD(a_max).describe("Maximum value");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream a_min_s, a_max_s;
a_min_s << a_min;
a_max_s << a_max;
(*dict)["a_min"] = a_min_s.str();
(*dict)["a_max"] = a_max_s.str();
}
};
struct clip {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* datas,
const float a_min,
const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = a_max;
} else if (data < a_min) {
out[i] = a_min;
} else {
out[i] = data;
}
}
};
struct clip_grad {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out,
const DType* grad,
const DType* datas,
const float a_min,
const float a_max) {
DType data = datas[i];
if (data > a_max) {
out[i] = 0;
} else if (data < a_min) {
out[i] = 0;
} else {
out[i] = grad[i];
}
}
};
template <typename xpu>
void Clip(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
mxnet_op::Kernel<mxnet::op::clip, xpu>::Launch(s,
outputs[0].Size(),
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
param.a_min,
param.a_max);
});
}
template <typename xpu>
void ClipEx(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs[0].dtype(), outputs[0].dtype());
CHECK_EQ(inputs[0].storage_type(), outputs[0].storage_type());
CHECK_NE(inputs[0].storage_type(), kDefaultStorage);
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Clip<xpu>);
}
template <typename xpu>
void ClipGrad_(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ClipParam& param = nnvm::get<ClipParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<clip_grad, xpu>::Launch(s,
outputs[0].Size(),
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<DType>(),
param.a_min,
param.a_max);
});
}
/*!
* \brief The parameters of the repeat operator include
* the number of repeating time and axis (optional).
* The parameters will be later used to deduce the
* output ndarray shape in bool RepeatShape() function.
*/
struct RepeatParam : public dmlc::Parameter<RepeatParam> {
int repeats = 1;
dmlc::optional<int> axis;
DMLC_DECLARE_PARAMETER(RepeatParam) {
DMLC_DECLARE_FIELD(repeats).describe("The number of repetitions for each element.");
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<int>())
.describe(
"The axis along which to repeat values."
" The negative numbers are interpreted counting from the backward."
" By default, use the flattened input array,"
" and return a flat output array.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream repeats_s, axis_s;
repeats_s << repeats;
axis_s << axis;
(*dict)["repeats"] = repeats_s.str();
(*dict)["axis"] = axis_s.str();
}
};
/*!
* \brief Helper function for getting user input params for the operator repeat.
* Sanity check the user input values.
*/
inline void GetRepeatParams(const RepeatParam& param,
const mxnet::TShape& ishape,
int* repeats,
dmlc::optional<int>* axisOpt) {
*repeats = param.repeats;
CHECK_GE(*repeats, 0) << "repeats cannot be a negative number";
*axisOpt = param.axis;
if (static_cast<bool>(*axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt->value();
if (axis < 0) {
axis += ndims;
}
CHECK(axis >= 0 && axis < ndims) << "axis = " << axisOpt->value() << " out of bounds";
}
}
inline bool RepeatOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!mxnet::ndim_is_known(ishape)) {
return false;
}
int repeats = 0;
dmlc::optional<int> axisOpt;
GetRepeatParams(param, ishape, &repeats, &axisOpt);
// If 0 repeats, return an empty 1-dim, 0-size array
if (0 == repeats) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(1, 0));
return true;
}
// If repeats > 0, multiply the size of the corresponding axis by repeats
if (static_cast<bool>(axisOpt)) {
int ndims = ishape.ndim();
int axis = axisOpt.value();
if (axis < 0) {
axis += ndims;
}
mxnet::TShape shape(ishape.ndim(), -1);
for (int i = 0; i < ishape.ndim(); ++i) {
if (i == axis) {
shape[i] = repeats * ishape[i];
} else {
shape[i] = ishape[i];
}
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
} else { // If axis is not input by user, return a flat 1D array of size = in.size*repeats
mxnet::TShape shape(1, ishape.Size() * repeats);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
}
return shape_is_known(out_attrs->at(0));
}
inline bool RepeatOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the funcitonality
* of operator repeat.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForRepeatOp(
const mxnet::TShape& ishape,
const dmlc::optional<int>& axisOpt,
const int repeats) {
if (static_cast<bool>(axisOpt)) {
int axis = axisOpt.value();
int ndim = ishape.ndim();
if (axis < 0) {
axis += ndim;
}
CHECK(axis >= 0 && axis < ishape.ndim()) << "Invalid input of axis";
// reshape the input tensor by adding a dim at the (axis+1)-th dim
mxnet::TShape rshape(ishape.ndim() + 1, 1);
// the shape we want to broadcast to
mxnet::TShape bshape(rshape.ndim(), 1);
int i = 0;
while (i <= axis) {
rshape[i] = bshape[i] = ishape[i];
++i;
}
rshape[i] = 1;
bshape[i] = repeats;
while (i < ishape.ndim()) {
rshape[i + 1] = ishape[i];
bshape[i + 1] = ishape[i];
++i;
}
return std::make_pair(rshape, bshape);
} else {
// axis is not input by user
// reshape the tensor into shape (ishape.Size(), 1)
// then add one dim at axis = 1 and broadcast to
// shape (ishape.Size(), repeats)
mxnet::TShape rshape(2, 1);
rshape[0] = ishape.Size();
rshape[1] = 1;
mxnet::TShape bshape(2, 1);
bshape[0] = rshape[0];
bshape[1] = repeats;
return std::make_pair(rshape, bshape);
}
}
template <typename xpu>
void RepeatOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
const TBlob& iTBlob = inputs[0];
const mxnet::TShape& ishape = iTBlob.shape_;
if (!shape_is_known(ishape))
return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, ishape, &repeats, &axisOpt);
if (0 == repeats)
return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(ishape, axisOpt, repeats);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_,
rshapes.first,
inputs[0].dev_mask(),
inputs[0].type_flag_,
inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_,
rshapes.second,
outputs[0].dev_mask(),
outputs[0].type_flag_,
outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template <typename xpu>
void RepeatOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const mxnet::TShape& oshape = outputs[0].shape_;
if (!shape_is_known(oshape))
return;
int repeats = 0;
dmlc::optional<int> axisOpt;
const RepeatParam& param = nnvm::get<RepeatParam>(attrs.parsed);
GetRepeatParams(param, oshape, &repeats, &axisOpt);
if (0 == repeats)
return;
std::pair<mxnet::TShape, mxnet::TShape> rshapes =
ReshapeInputOutputForRepeatOp(oshape, axisOpt, repeats);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_,
rshapes.first,
outputs[0].dev_mask(),
outputs[0].type_flag_,
outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_,
rshapes.second,
inputs[0].dev_mask(),
inputs[0].type_flag_,
inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
#if !defined(__CUDACC__)
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
#else
ReduceAxesRTCComputeImpl(
ctx, newInputs, req, newOutputs, rshapes.first, "red::sum{}", nullptr, false);
#endif
}
struct TileParam : public dmlc::Parameter<TileParam> {
mxnet::Tuple<int> reps;
DMLC_DECLARE_PARAMETER(TileParam) {
DMLC_DECLARE_FIELD(reps).describe(
"The number of times for repeating the tensor a. Each dim size of reps"
" must be a positive integer."
" If reps has length d, the result will have dimension of max(d, a.ndim);"
" If a.ndim < d, a is promoted to be d-dimensional by prepending new axes."
" If a.ndim > d, reps is promoted to a.ndim by pre-pending 1's to it.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream reps_s;
reps_s << reps;
(*dict)["reps"] = reps_s.str();
}
};
inline bool TileOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
const TileParam& param = nnvm::get<TileParam>(attrs.parsed);
const mxnet::TShape& ishape = (*in_attrs)[0];
if (!shape_is_known(ishape)) {
return false;
}
const mxnet::Tuple<int>& reps = param.reps;
// If reps is empty, return a identical input array
if (reps.ndim() == 0) {
SHAPE_ASSIGN_CHECK(*out_attrs, 0, ishape);
return true;
}
mxnet::TShape oshape(std::max(ishape.ndim(), reps.ndim()), -1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = oshape.ndim() - 1; i >= 0; --i) {
if (i1 >= 0 && i2 >= 0) {
oshape[i] = ishape[i1--] * reps[i2--];
} else if (i1 >= 0) {
oshape[i] = ishape[i1--];
} else if (i2 >= 0) {
oshape[i] = reps[i2--];
}
}
// If reps contains 0s, oshape is a zero-size shape.
// Need to distinguish between np_shape mode and legacy mode.
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&oshape);
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
inline bool TileOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
if ((*in_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, (*in_attrs)[0]);
} else if ((*out_attrs)[0] != -1) {
TYPE_ASSIGN_CHECK(*in_attrs, 0, (*out_attrs)[0]);
}
return true;
}
/*!
* \brief Reshape the input and output tensors for
* using broadcast_to to achieve the functionality
* of operator tile.
* \return a pair of mxnet::TShape's, first is the reshaped
* input shape, second is the reshaped output shape.
*/
inline std::pair<mxnet::TShape, mxnet::TShape> ReshapeInputOutputForTileOp(
const mxnet::TShape& ishape,
const mxnet::Tuple<int>& reps) {
if (reps.ndim() == 0) {
return std::make_pair(ishape, ishape);
}
// The shape we want to broadcast to
mxnet::TShape bshape(std::max(ishape.ndim(), reps.ndim()) * 2, 1);
// The shape of the input tensor after adding new axes before each dim
mxnet::TShape rshape(bshape.ndim(), 1);
int i1 = ishape.ndim() - 1;
int i2 = reps.ndim() - 1;
for (int i = bshape.ndim() - 1; i >= 0; --i) {
if (0 == (i & 1)) {
bshape[i] = (i2 >= 0 ? reps[i2--] : 1);
rshape[i] = 1;
} else {
rshape[i] = bshape[i] = (i1 >= 0 ? ishape[i1--] : 1);
}
}
return std::make_pair(rshape, bshape);
}
/*!
* \brief Implementation of tiling the input tensor a based
* on the user-input shape, reps.
* If a.ndim < reps.ndim, new axes are pre-pended to a. For example,
* the input tensor has shape (3,), and the reps is (2, 4); the input
* tensor would be reshaped to (1, 3).
* If a.ndim > reps.ndim, pre-pending 1's to reps. For example,
* the input tensor has shape (2, 3, 4, 5), and reps is (2, 2);
* the reps would be changed to (1, 1, 2, 2).
* Suppose we have a.ndim = reps.ndim now. To achieve tiling,
* we utilize the operator broadcast_to. For example, for a tensor
* of shape (2, 3, 4, 5) and reps (2, 8, 9, 3), we first reshape
* the tensor to the shape (1, 2, 1, 3, 1, 4, 1, 5) by adding
* one axis before each dimension. Then, we want to broadcast
* the new tensor to shape (2, 2, 8, 3, 9, 4, 3, 5). The final
* output tensor would have shape (2*2, 8*3, 9*4, 3*5).
*/
template <typename xpu>
void TileOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0)
return;
const mxnet::TShape& ishape = inputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i])
return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(ishape, reps);
// reshaped input tblob
TBlob iblob(inputs[0].dptr_,
rshapes.first,
inputs[0].dev_mask(),
inputs[0].type_flag_,
inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
// reshaped output tblob
TBlob oblob(outputs[0].dptr_,
rshapes.second,
outputs[0].dev_mask(),
outputs[0].type_flag_,
outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
BroadcastCompute<xpu>(attrs, ctx, newInputs, req, newOutputs);
}
/*!
* \brief Compute the gradient of the loss function
* with respect to the input of the operator.
* Backpropagation is employed to implement the
* chain rule.
* \param inputs the gradient of the loss function
* with respect to the outputs of the operator
* \param outputs the gradient of the loss function
* with respect to the inputs of the operator
*/
template <typename xpu>
void TileOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
if (inputs[0].Size() == 0)
return;
const mxnet::TShape& oshape = outputs[0].shape_;
const mxnet::Tuple<int>& reps = nnvm::get<TileParam>(attrs.parsed).reps;
// If any one of the number in reps is zero, return immediately
for (int i = 0; i < reps.ndim(); ++i) {
if (0 == reps[i])
return;
}
std::pair<mxnet::TShape, mxnet::TShape> rshapes = ReshapeInputOutputForTileOp(oshape, reps);
// reshaped output grad tblob
TBlob oblob(outputs[0].dptr_,
rshapes.first,
outputs[0].dev_mask(),
outputs[0].type_flag_,
outputs[0].dev_id());
std::vector<TBlob> newOutputs = {oblob};
// reshaped input grad tblob
TBlob iblob(inputs[0].dptr_,
rshapes.second,
inputs[0].dev_mask(),
inputs[0].type_flag_,
inputs[0].dev_id());
std::vector<TBlob> newInputs = {iblob};
#if !defined(__CUDACC__)
ReduceAxesComputeImpl<xpu, mshadow::red::sum, false, false>(
ctx, newInputs, req, newOutputs, rshapes.first);
#else
ReduceAxesRTCComputeImpl(
ctx, newInputs, req, newOutputs, rshapes.first, "red::sum{}", nullptr, false);
#endif
}
struct ReverseParam : public dmlc::Parameter<ReverseParam> {
mxnet::Tuple<int> axis;
DMLC_DECLARE_PARAMETER(ReverseParam) {
DMLC_DECLARE_FIELD(axis).describe("The axis which to reverse elements.");
}
};
#define REVERSE_MAX_DIM 10U
struct reverse {
MSHADOW_XINLINE static index_t ReverseIndex(index_t idx,
index_t nreversedim,
const index_t* stride_,
const index_t* trailing_) {
index_t outputIndex = idx;
for (index_t i = 0; i < nreversedim; ++i) {
const index_t low = outputIndex % trailing_[i];
index_t high = outputIndex / trailing_[i];
const index_t x = high % stride_[i];
high /= stride_[i];
outputIndex = (high * stride_[i] + stride_[i] - 1 - x) * trailing_[i] + low;
}
return outputIndex;
}
#ifdef __CUDACC__
template <typename DType>
__device__ static void Map(index_t index,
index_t nreversedim,
const DType* src,
DType* dst,
const index_t* stride_,
const index_t* trailing_) {
__shared__ index_t stride_share[REVERSE_MAX_DIM];
__shared__ index_t trailing_share[REVERSE_MAX_DIM];
if (threadIdx.x < REVERSE_MAX_DIM) {
stride_share[threadIdx.x] = stride_[threadIdx.x];
trailing_share[threadIdx.x] = trailing_[threadIdx.x];
}
__syncthreads();
index_t new_idx = ReverseIndex(index, nreversedim, stride_share, trailing_share);
dst[new_idx] = src[index];
}
#else
template <typename DType>
MSHADOW_XINLINE static void Map(index_t index,
index_t nreversedim,
const DType* src,
DType* dst,
const index_t* stride_,
const index_t* trailing_) {
index_t new_idx = ReverseIndex(index, nreversedim, stride_, trailing_);
dst[new_idx] = src[index];
}
#endif
};
template <typename xpu>
void ReverseOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
const ReverseParam& param = nnvm::get<ReverseParam>(attrs.parsed);
CHECK_EQ(inputs[0].type_flag_, outputs[0].type_flag_);
CHECK_LT(param.axis.ndim(), REVERSE_MAX_DIM);
Stream<xpu>* s = ctx.get_stream<xpu>();
const mxnet::TShape& ishape = inputs[0].shape_;
std::vector<index_t> stride_(param.axis.ndim());
std::vector<index_t> trailing_(param.axis.ndim());
index_t reverse_index = 0;
for (int axis : param.axis) {
CHECK_LT(axis, ishape.ndim());
stride_[reverse_index] = ishape[axis];
trailing_[reverse_index] = 1;
for (int i2 = axis + 1; i2 < ishape.ndim(); ++i2) {
trailing_[reverse_index] *= ishape[i2];
}
reverse_index++;
}
#ifdef __CUDACC__
mshadow::Tensor<xpu, 1, uint8_t> workspace = ctx.requested[0].get_space_typed<xpu, 1, uint8_t>(
mshadow::Shape1(reverse_index * sizeof(index_t) * 2), s);
auto stride_workspace = workspace.dptr_;
auto trailing_workspace = workspace.dptr_ + reverse_index * sizeof(index_t);
cudaMemcpyAsync(stride_workspace,
thrust::raw_pointer_cast(stride_.data()),
stride_.size() * sizeof(index_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
cudaMemcpyAsync(trailing_workspace,
thrust::raw_pointer_cast(trailing_.data()),
trailing_.size() * sizeof(index_t),
cudaMemcpyHostToDevice,
mshadow::Stream<gpu>::GetStream(s));
#endif
#ifdef __CUDACC__
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s,
inputs[0].Size(),
reverse_index,
inputs[0].dptr<DType>(),
outputs[0].dptr<DType>(),
reinterpret_cast<index_t*>(stride_workspace),
reinterpret_cast<index_t*>(trailing_workspace));
});
#else
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Kernel<reverse, xpu>::Launch(s,
inputs[0].Size(),
reverse_index,
inputs[0].dptr<DType>(),
outputs[0].dptr<DType>(),
stride_.data(),
trailing_.data());
});
#endif
}
struct StackParam : public dmlc::Parameter<StackParam> {
int axis;
int num_args;
DMLC_DECLARE_PARAMETER(StackParam) {
DMLC_DECLARE_FIELD(axis).set_default(0).describe(
"The axis in the result array along which the input arrays are stacked.");
DMLC_DECLARE_FIELD(num_args).set_lower_bound(1).describe("Number of inputs to be stacked.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s, num_args_s;
axis_s << axis;
num_args_s << num_args;
(*dict)["axis"] = axis_s.str();
(*dict)["num_args"] = num_args_s.str();
}
};
inline bool StackOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
mxnet::TShape dshape;
for (const mxnet::TShape& i : (*in_attrs)) {
shape_assign(&dshape, i);
}
if (!shape_is_known(dshape))
return false;
mxnet::TShape oshape(dshape.ndim() + 1, -1);
int axis = CheckAxis(param.axis, oshape.ndim());
for (int i = 0; i < axis; ++i) {
oshape[i] = dshape[i];
}
oshape[axis] = param.num_args;
for (index_t i = axis + 1; i < oshape.ndim(); ++i) {
oshape[i] = dshape[i - 1];
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape);
return shape_is_known(oshape);
}
template <typename xpu>
void StackOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, outputs[0].ndim());
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType>> data(inputs.size());
Tensor<xpu, 3, DType> out;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= outputs[0].shape_[i];
}
for (int i = axis + 1; i < outputs[0].ndim(); ++i) {
trailing *= outputs[0].shape_[i];
}
size_t mid = outputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
out = outputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < inputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
data[i] = inputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Concatenate(data, &out, 1, req[0]);
})
}
template <typename xpu>
void StackOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
const StackParam& param = dmlc::get<StackParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
Stream<xpu>* s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
std::vector<Tensor<xpu, 3, DType>> grad_in(outputs.size());
Tensor<xpu, 3, DType> grad;
size_t leading = 1, trailing = 1;
for (int i = 0; i < axis; ++i) {
leading *= inputs[0].shape_[i];
}
for (int i = axis + 1; i < inputs[0].ndim(); ++i) {
trailing *= inputs[0].shape_[i];
}
size_t mid = inputs[0].shape_[axis];
Shape<3> oshape = Shape3(leading, mid, trailing);
grad = inputs[0].get_with_shape<xpu, 3, DType>(oshape, s);
for (size_t i = 0; i < outputs.size(); ++i) {
Shape<3> dshape = Shape3(leading, 1, trailing);
grad_in[i] = outputs[i].get_with_shape<xpu, 3, DType>(dshape, s);
}
Split(grad, &grad_in, 1, req);
})
}
struct SqueezeParam : public dmlc::Parameter<SqueezeParam> {
dmlc::optional<mxnet::Tuple<int>> axis;
DMLC_DECLARE_PARAMETER(SqueezeParam) {
DMLC_DECLARE_FIELD(axis)
.set_default(dmlc::optional<mxnet::Tuple<int>>())
.describe(
"Selects a subset of the single-dimensional entries in the shape."
" If an axis is selected with shape entry greater than one, an error is raised.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream axis_s;
axis_s << axis;
(*dict)["axis"] = axis_s.str();
}
};
// Given a shape that may have dim size equal to 0,
// move all the zeros to the last of the shape array
// and keep the relative order of the non-zero values.
// Returns the new shape size after moving all zeros to the end.
inline size_t SqueezeShapeHelper(mxnet::TShape* shape) {
CHECK(shape != nullptr);
size_t count = 0;
for (int i = 0; i < shape->ndim(); ++i) {
if ((*shape)[i] == -1) {
++count;
} else {
std::swap((*shape)[i], (*shape)[i - count]);
}
}
return shape->ndim() - count;
}
inline bool SqueezeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const SqueezeParam& param = nnvm::get<SqueezeParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U) << "Input: [data]";
CHECK_EQ(out_attrs->size(), 1U);
const mxnet::TShape& dshape = in_attrs->at(0);
const int dndim = dshape.ndim();
if (!shape_is_known(dshape))
return false;
mxnet::TShape oshape = dshape;
if (param.axis.has_value()) {
// preprocess axis
mxnet::Tuple<int> axes = param.axis.value();
for (int i = 0; i < axes.ndim(); ++i) {
if (axes[i] < 0) {
axes[i] += dndim;
CHECK_GE(axes[i], 0) << "axis " << axes[i] - dndim
<< " is out of bounds for array of dimension " << dndim;
}
CHECK_LT(axes[i], dndim) << "axis " << axes[i] << " is out of bounds for array of dimension "
<< dndim;
CHECK_EQ(dshape[axes[i]], 1)
<< "cannot select an axis to squeeze out which has size=" << dshape[axes[i]]
<< " not equal to one";
CHECK_NE(oshape[axes[i]], -1) << "duplicate value in axis";
oshape[axes[i]] = -1;
}
} else {
for (int i = 0; i < oshape.ndim(); ++i) {
if (oshape[i] == 1)
oshape[i] = -1;
}
}
size_t oshape_size = SqueezeShapeHelper(&oshape);
if (oshape_size == 0) { // corner case when dshape is (1, 1, 1, 1)
oshape[0] = 1;
oshape_size = 1;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(oshape.data(), oshape.data() + oshape_size));
return true;
}
struct DepthToSpaceParam : public dmlc::Parameter<DepthToSpaceParam> {
int block_size;
DMLC_DECLARE_PARAMETER(DepthToSpaceParam) {
DMLC_DECLARE_FIELD(block_size).describe("Blocks of [block_size. block_size] are moved");
}
};
inline bool DepthToSpaceOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Depth To Space requires exactly 4D tensor";
mxnet::TShape expected_out(4, -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_EQ(in_shape[1] % (block * block), 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:1(depth dimension) should be a multiple of 'block^2'";
CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] / (block * block);
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] * block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool DepthToSpaceOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function updates the value of input index from where the data element
* needs to be fetched and written out to the ith location in output tensor
* \param index_position index within offset array to get offset of given dimension
* \param dim_size size of current dimension
* \param idx output tensor index
* \param inp_index index within input tensor from where value is retrieved
* \param offset_arr array containing the linear offset of input tensor
*/
MSHADOW_XINLINE void update_index(index_t index_position,
index_t dim_size,
index_t* idx,
index_t* inp_index,
const index_t* offset_arr) {
index_t next_idx_val = *idx / dim_size;
*inp_index += (*idx - next_idx_val * dim_size) * offset_arr[index_position];
*idx = next_idx_val;
}
/*!
* \brief This function performs the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 4, 1, 5, 2) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template <int req>
struct depth_to_space_forward {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out_data,
const DType* in_data,
const int block,
const index_t* size,
const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[3];
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2];
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1] / (block * block);
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing depth_to_space operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template <int req>
struct compute_offset_for_depth_to_space {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* offset_arr,
DType* size,
const int block,
const index_t size0,
const index_t size1,
const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * size[3];
offset_arr[3] = offset_arr[4] * size[2];
offset_arr[2] = offset_arr[3] * size[1] / (block * block);
offset_arr[1] = offset_arr[2] * block;
offset_arr[0] = offset_arr[1] * block;
}
};
template <typename xpu>
void DepthToSpaceOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_depth_to_space<req_type>, xpu>::Launch(s,
1,
offset_arr,
size,
block,
in_data.shape_[0],
in_data.shape_[1],
in_data.shape_[2],
in_data.shape_[3]);
Kernel<depth_to_space_forward<req_type>, xpu>::Launch(s,
out_data.Size(),
out_data.dptr<DType>(),
in_data.dptr<DType>(),
block,
size,
offset_arr);
});
});
}
inline bool SpaceToDepthOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
CHECK_EQ(in_attrs->at(0).ndim(), 4) << "Operation Space To Depth requires exactly 4D tensor";
mxnet::TShape expected_out(in_attrs->at(0).ndim(), -1);
mxnet::TShape& in_shape = in_attrs->at(0);
if (!mxnet::ndim_is_known(in_shape)) {
return false;
}
int block = param.block_size;
CHECK_NE(block, 0) << "block_size must be a positive integer value";
CHECK_NE(in_shape[0], 0) << "Operation requires a 4D tensor. Size of dimension:0 cannot be 0";
CHECK_NE(in_shape[1], 0) << "Depth dimension:1 cannot be 0";
CHECK_NE(in_shape[2], 0) << "Operation requires a 4D tensor. Size of dimension:2 cannot be 0";
CHECK_EQ(in_shape[2] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:2(1st Space dimension) should be a multiple of 'block' ";
CHECK_NE(in_shape[3], 0) << "Operation requires a 4D tensor. Size of dimension:3 cannot be 0";
CHECK_EQ(in_shape[3] % block, 0)
<< "Cannot perform Depth To Space operation on the specified tensor."
" Dimension:3(2nd space dimension) should be a multiple of 'block' ";
expected_out[0] = in_shape[0];
expected_out[1] = in_shape[1] * block * block;
int i = 2;
while (i < expected_out.ndim()) {
expected_out[i] = in_shape[i] / block;
++i;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, expected_out);
return shape_is_known(expected_out);
}
inline bool SpaceToDepthOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0));
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0));
return out_attrs->at(0) != -1;
}
/*!
* \brief This function preforms the tensor transpose (0, 1, 2, 3, 4, 5) ->
* (0, 3, 5, 1, 2, 4) by computing linear index within input tensor to be mapped
* to the ith index of output tensor
* \param i tensor index
* \param out_data output tensor
* \param in_data input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size array containing the size of each dimension of input tensor
* \param offset_arr array containing the linear offset of input tensor
*/
template <int req>
struct space_to_depth_forward {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* out_data,
const DType* in_data,
const int block,
const index_t* size,
const index_t* offset_arr) {
index_t inp_index = 0, idx = i, dim_size;
dim_size = size[3] / block;
update_index(4, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[2] / block;
update_index(2, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[1];
update_index(1, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(5, dim_size, &idx, &inp_index, offset_arr);
dim_size = block;
update_index(3, dim_size, &idx, &inp_index, offset_arr);
dim_size = size[0];
update_index(0, dim_size, &idx, &inp_index, offset_arr);
KERNEL_ASSIGN(out_data[i], req, in_data[inp_index]);
}
};
/*!
* \brief This function calculates the linear offset for each dimension of
* input tensor and stores them in an array, which is later used in
* performing space_to_depth operation
* \param i global thread id
* \param offset_arr array to be populated with offset values
* \param size array to be populated with size of each dimension of input tensor
* \param block size of chunks to be moved out of depth dimension
* \param size0 size of Dim 0 of input tensor
* \param size1 size of Dim 1 of input tensor
* \param size2 size of Dim 2 of input tensor
* \param size3 size of Dim 3 of input tensor
*/
template <int req>
struct compute_offset_for_space_to_depth {
template <typename DType>
MSHADOW_XINLINE static void Map(index_t i,
DType* offset_arr,
DType* size,
const int block,
const index_t size0,
const index_t size1,
const index_t size2,
const index_t size3) {
size[0] = size0;
size[1] = size1;
size[2] = size2;
size[3] = size3;
offset_arr[5] = 1;
offset_arr[4] = offset_arr[5] * block;
offset_arr[3] = offset_arr[4] * size[3] / block;
offset_arr[2] = offset_arr[3] * block;
offset_arr[1] = offset_arr[2] * size[2] / block;
offset_arr[0] = offset_arr[1] * size[1];
}
};
template <typename xpu>
void SpaceToDepthOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
CHECK_EQ(req.size(), 1U);
mshadow::Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& in_data = inputs[0];
const TBlob& out_data = outputs[0];
const DepthToSpaceParam& param = nnvm::get<DepthToSpaceParam>(attrs.parsed);
using namespace mxnet_op;
int block = param.block_size;
mshadow::Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(mshadow::Shape1(sizeof(index_t) * 10), s);
char* workspace_curr_ptr = workspace.dptr_;
index_t* offset_arr = reinterpret_cast<index_t*>(workspace_curr_ptr);
index_t* size = reinterpret_cast<index_t*>(workspace_curr_ptr + sizeof(index_t) * 6);
MSHADOW_TYPE_SWITCH(out_data.type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, {
Kernel<compute_offset_for_space_to_depth<req_type>, xpu>::Launch(s,
1,
offset_arr,
size,
block,
in_data.shape_[0],
in_data.shape_[1],
in_data.shape_[2],
in_data.shape_[3]);
Kernel<space_to_depth_forward<req_type>, xpu>::Launch(s,
out_data.Size(),
out_data.dptr<DType>(),
in_data.dptr<DType>(),
block,
size,
offset_arr);
});
});
}
namespace split_enum {
enum SplitOpInputs { kData };
} // namespace split_enum
struct SplitParam : public dmlc::Parameter<SplitParam> {
mxnet::TShape indices;
int axis;
bool squeeze_axis;
int sections;
DMLC_DECLARE_PARAMETER(SplitParam) {
DMLC_DECLARE_FIELD(indices).describe(
"Indices of splits. The elements should denote the boundaries of at which split"
" is performed along the `axis`.");
DMLC_DECLARE_FIELD(axis).set_default(1).describe("Axis along which to split.");
DMLC_DECLARE_FIELD(squeeze_axis)
.set_default(0)
.describe(
"If true, Removes the axis with length 1 from the shapes of the output arrays."
" **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1"
" only along the `axis` which it is split."
" Also `squeeze_axis` can be set to ``true``"
" only if ``input.shape[axis] == num_outputs``.");
DMLC_DECLARE_FIELD(sections).set_default(0).describe(
"Number of sections if equally splitted. Default to 0 which means split by indices.");
}
void SetAttrDict(std::unordered_map<std::string, std::string>* dict) {
std::ostringstream indices_s, axis_s, squeeze_axis_s, sections_s;
indices_s << indices;
axis_s << axis;
squeeze_axis_s << squeeze_axis;
sections_s << sections;
(*dict)["indices"] = indices_s.str();
(*dict)["axis"] = axis_s.str();
(*dict)["squeeze_axis"] = squeeze_axis_s.str();
(*dict)["sections"] = sections_s.str();
}
}; // struct SplitParam
inline mxnet::TShape GetSplitIndices(const mxnet::TShape& ishape, int axis, int sections) {
mxnet::TShape indices(sections + 1, -1);
indices[0] = 0;
int64_t section_size_b = (int64_t)(ishape[axis] / sections);
int64_t section_size_a = section_size_b + 1;
int section_a = ishape[axis] % sections;
for (int i = 0; i < sections; ++i) {
if (i < section_a) {
indices[i + 1] = section_size_a * (i + 1);
} else {
indices[i + 1] = section_size_b + indices[i];
}
}
return indices;
}
inline bool SplitOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(in_attrs->size(), 1U);
int dtype = (*in_attrs)[0];
CHECK_NE(dtype, -1) << "First input must have specified type";
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
out_attrs->clear();
int num_outputs = (param.sections > 0) ? param.sections : param.indices.ndim();
for (int i = 0; i < num_outputs; ++i) {
out_attrs->push_back(dtype);
}
return true;
}
inline bool SplitOpShapeImpl(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs,
const int real_axis) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
mxnet::TShape ishape = in_attrs->at(split_enum::kData);
const mxnet::TShape indices =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
int num_outputs = (param.sections > 0) ? indices.ndim() - 1 : indices.ndim();
// Pre-compute squeezed output shape for future usage
mxnet::TShape squeezed_dshape = dshape;
for (int d = real_axis; d < squeezed_dshape.ndim() - 1; ++d) {
squeezed_dshape[d] = squeezed_dshape[d + 1];
}
squeezed_dshape =
mxnet::TShape(&squeezed_dshape[0], &squeezed_dshape[squeezed_dshape.ndim() - 1]);
// Assign shape to every output
for (int i = 0; i < num_outputs; ++i) {
index_t start = indices[i];
index_t end = (i < num_outputs - 1) ? indices[i + 1] : ishape[real_axis];
if (ishape[real_axis] == 0U) {
end = start;
} else {
CHECK(start <= end) << "start " << start << " is not less than end " << end << "for subarray "
<< i;
CHECK(end <= ishape[real_axis])
<< "end " << end << " is no less than the size of the axis " << ishape[real_axis];
}
dshape[real_axis] = (end - start);
if (param.squeeze_axis) {
CHECK_EQ(end - start, 1U) << "expected axis size of 1 but got " << end - start;
SHAPE_ASSIGN_CHECK(*out_attrs, i, squeezed_dshape);
} else {
SHAPE_ASSIGN_CHECK(*out_attrs, i, dshape);
}
}
mxnet::TShape back_calculate_dshape = ishape;
back_calculate_dshape[real_axis] = 0;
for (int d = 0; d < real_axis; ++d) {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
if (param.squeeze_axis) {
back_calculate_dshape[real_axis] = num_outputs;
} else {
for (int i = 0; i < num_outputs; ++i) {
back_calculate_dshape[real_axis] += (*out_attrs)[i][real_axis];
}
}
for (int d = real_axis + 1; d < ishape.ndim(); ++d) {
if (param.squeeze_axis) {
back_calculate_dshape[d] = (*out_attrs)[0][d - 1];
} else {
back_calculate_dshape[d] = (*out_attrs)[0][d];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, split_enum::kData, back_calculate_dshape);
return true;
}
inline bool SplitOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
mxnet::ShapeVector* out_attrs) {
using namespace mshadow;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), 1U);
mxnet::TShape dshape = in_attrs->at(split_enum::kData);
if (!mxnet::ndim_is_known(dshape))
return false;
if (param.axis >= 0) {
CHECK_LT(param.axis, dshape.ndim());
} else {
CHECK_LT(param.axis + dshape.ndim(), dshape.ndim());
}
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += dshape.ndim();
}
return SplitOpShapeImpl(attrs, in_attrs, out_attrs, real_axis);
}
struct SplitKernel {
/*!
* \brief Map function for forward split_v2 operator
* \param i global thread id
* \param in_data ptr to input buffer
* \param out_data ptr to ptr of outputs buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template <typename DType>
static MSHADOW_XINLINE void Map(size_t i,
const DType* in_data,
DType** out_data,
const size_t* indices,
const size_t num_sections,
const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t target = 0;
for (size_t section = 0; section < num_sections && indices[section] <= idx;
target = section++) {
}
DType* target_data = out_data[target];
const size_t mid_idx = idx - indices[target];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[target + 1] - indices[target];
const size_t target_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
target_data[target_idx] = in_data[i];
}
};
struct ConcatenateKernel {
/*!
* \brief Map function for backward split_v2 operator
* \param i global thread id
* \param out_grad ptr to ptr of out grads buffer
* \param in_grad ptr to input grad buffer
* \param indices ptr to indices buffer
* \param num_sections # of sections after split
* \param axis_size size of axis to be splitted on
* \param trailing_size step size within the data buffer of the axis to be splitted on
*/
template <typename DType>
static MSHADOW_XINLINE void Map(size_t i,
DType** out_grad,
DType* in_grad,
const size_t* indices,
const size_t num_sections,
const size_t axis_size,
const size_t trailing_size) {
size_t idx = i / trailing_size % axis_size;
size_t src = 0;
for (size_t section = 0; section < num_sections && indices[section] <= idx; src = section++) {
}
DType* src_grad = out_grad[src];
const size_t mid_idx = idx - indices[src];
const size_t head_idx = i / (trailing_size * axis_size);
const size_t tail_idx = i % trailing_size;
const size_t section_size = indices[src + 1] - indices[src];
const size_t src_idx =
head_idx * trailing_size * section_size + mid_idx * trailing_size + tail_idx;
in_grad[i] = src_grad[src_idx];
}
};
template <typename xpu>
inline void SplitOpForwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu>* s = ctx.get_stream<xpu>();
const TBlob& input_data = inputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_data.ndim());
size_t mid = input_data.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_data.shape_[i];
}
for (int i = real_axis + 1; i < input_data.ndim(); ++i) {
trailing *= input_data.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_data.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_data.type_flag_, DType, {
std::vector<DType*> output_data;
for (const TBlob& data : outputs) {
output_data.push_back(data.dptr<DType>());
}
workspace_size += output_data.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(reinterpret_cast<size_t*>(workspace.dptr_),
Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(output_data.data(), Shape1(output_data.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(output_data.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<SplitKernel, xpu>::Launch(s,
input_data.Size(),
input_data.dptr<DType>(),
ptrs_xpu_tensor.dptr_,
indices_xpu_tensor.dptr_,
indices.size() - 1,
mid,
trailing);
});
}
template <typename xpu>
inline void SplitOpForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim());
const TBlob& input_data = inputs[split_enum::kData];
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += input_data.ndim();
}
SplitOpForwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
template <typename xpu>
inline void SplitOpBackwardImpl(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs,
const int real_axis) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
Stream<xpu>* s = ctx.get_stream<xpu>();
TBlob input_grad = outputs[split_enum::kData];
size_t leading = 1, trailing = 1;
CHECK_LT(real_axis, input_grad.ndim());
size_t mid = input_grad.shape_[real_axis];
for (int i = 0; i < real_axis; ++i) {
leading *= input_grad.shape_[i];
}
for (int i = real_axis + 1; i < input_grad.ndim(); ++i) {
trailing *= input_grad.shape_[i];
}
size_t workspace_size = 0;
const mxnet::TShape& ishape = input_grad.shape_;
const mxnet::TShape split_pts =
(param.sections > 0) ? GetSplitIndices(ishape, real_axis, param.sections) : param.indices;
std::vector<size_t> indices;
for (const auto& section : split_pts) {
indices.push_back(section);
}
if (param.sections == 0) {
indices.push_back(ishape[real_axis]);
}
workspace_size += indices.size() * sizeof(size_t);
MSHADOW_TYPE_SWITCH(input_grad.type_flag_, DType, {
std::vector<DType*> out_grads;
for (const TBlob& output_grad : inputs) {
out_grads.push_back(output_grad.dptr<DType>());
}
workspace_size += out_grads.size() * sizeof(DType*);
Tensor<xpu, 1, char> workspace =
ctx.requested[0].get_space_typed<xpu, 1, char>(Shape1(workspace_size), s);
Tensor<cpu, 1, size_t> indices_cpu_tensor(indices.data(), Shape1(indices.size()));
Tensor<xpu, 1, size_t> indices_xpu_tensor(reinterpret_cast<size_t*>(workspace.dptr_),
Shape1(indices.size()));
Tensor<cpu, 1, DType*> ptrs_cpu_tensor(out_grads.data(), Shape1(inputs.size()));
Tensor<xpu, 1, DType*> ptrs_xpu_tensor(
reinterpret_cast<DType**>(workspace.dptr_ + indices.size() * sizeof(size_t)),
Shape1(inputs.size()));
mshadow::Copy(indices_xpu_tensor, indices_cpu_tensor, s);
mshadow::Copy(ptrs_xpu_tensor, ptrs_cpu_tensor, s);
Kernel<ConcatenateKernel, xpu>::Launch(s,
input_grad.Size(),
ptrs_xpu_tensor.dptr_,
input_grad.dptr<DType>(),
indices_xpu_tensor.dptr_,
indices.size() - 1,
mid,
trailing);
});
}
template <typename xpu>
inline void SplitOpBackward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mxnet_op;
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
CHECK_EQ(inputs.size(), (param.sections > 0) ? param.sections : param.indices.ndim())
<< "out grad vector size mush match the output size";
CHECK_EQ(outputs.size(), 1U);
int real_axis = param.axis;
if (real_axis < 0) {
real_axis += outputs[split_enum::kData].ndim();
}
SplitOpBackwardImpl<xpu>(attrs, ctx, inputs, req, outputs, real_axis);
}
inline uint32_t SplitNumOutputs(const NodeAttrs& attrs) {
const SplitParam& param = nnvm::get<SplitParam>(attrs.parsed);
return (param.sections > 0) ? param.sections : param.indices.ndim();
}
} // namespace op
} // namespace mxnet
namespace std {
template <>
struct hash<mxnet::op::TransposeParam> {
size_t operator()(const mxnet::op::TransposeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axes);
return ret;
}
};
template <>
struct hash<mxnet::op::ReshapeParam> {
size_t operator()(const mxnet::op::ReshapeParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.target_shape);
ret = dmlc::HashCombine(ret, val.keep_highest);
ret = dmlc::HashCombine(ret, val.shape);
ret = dmlc::HashCombine(ret, val.reverse);
return ret;
}
};
template <>
struct hash<mxnet::op::ExpandDimParam> {
size_t operator()(const mxnet::op::ExpandDimParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_TENSOR_MATRIX_OP_INL_H_
|
GB_unop__ainv_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_uint32_uint32)
// op(A') function: GB (_unop_tran__ainv_uint32_uint32)
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_uint32_uint32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_uint32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB014-outofbounds-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outmost loop is parallelized.
But the inner level loop has out of bound access for b[i][j] when j equals to 0.
This will case memory access of a previous row's last element.
For example, an array of 4x4:
j=0 1 2 3
i=0 x x x x
1 x x x x
2 x x x x
3 x x x x
outer loop: i=2,
inner loop: j=0
array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3]
due to linearized row-major storage of the 2-D array.
This causes loop-carried data dependence between i=2 and i=1.
Data race pair: b[i][j]@75 vs. b[i][j-1]@75.
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,j;
int n=100, m=100;
double b[n][m];
for (i=1;i<n;i++)
#pragma omp parallel for private(j )
for (j=0;j<m;j++)
b[i][j]=b[i-1][j];
printf ("b[50][50]=%f\n",b[50][50]);
return 0;
}
|
GB_unaryop__identity_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_int64
// op(A') function: GB_tran__identity_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_int64
(
float *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "opencl.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = (float*)calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
#ifndef ARM
#include "clBLAS.h"
#endif
void gemm_kernel_init(void)
{
#ifndef ARM
cl_int clErr;
clErr = clblasSetup();
if (clErr != CL_SUCCESS)
{
printf("gemm_kernel_init: Could not setup clBLAS. Errorcode: %d\n", clErr);
}
#endif
}
void gemm_kernel_release(void)
{
#ifndef ARM
clblasTeardown();
#endif
}
cl_mem_ext random_matrix_gpu(int rows, int cols)
{
int i;
float *m = (float*)calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return opencl_make_array(m, rows*cols);
}
#if !defined(GPU_MULTI) && !defined(ARM)
void gemm_offset_gpu(int TA, int TB, int M, int N, int K,
float ALPHA,
cl_mem_ext A_gpu, int offset_A, int lda,
cl_mem_ext B_gpu, int offset_B, int ldb,
float BETA,
cl_mem_ext C_gpu, int offset_C, int ldc)
{
#ifdef BENCHMARK
clock_t t;
t = clock();
#endif
cl_int clErr;
clErr = clblasSgemm(clblasRowMajor,
(TA ? clblasTrans : clblasNoTrans),
(TB ? clblasTrans : clblasNoTrans),
M, N, K, ALPHA,
A_gpu.mem, offset_A, lda,
B_gpu.mem, offset_B, ldb,
BETA,
C_gpu.mem, offset_C, ldc,
1, &opencl_queues[opencl_device_id_t], 0, NULL, NULL);
#ifdef GPU_SAFE
clFinish(opencl_queues[opencl_device_id_t]);
#endif
#ifdef BENCHMARK
t = clock() - t;
double time_taken = ((double)t);
printf("%s\t%d\n", "clblasSgemm", (int)time_taken);
#endif
if (clErr != CL_SUCCESS)
{
printf("gemm_gpu: clblasSgemm failed. Errorcode: %d\n", clErr);
}
}
#endif
void gemm_gpu(int TA, int TB, int M, int N, int K,
float ALPHA,
cl_mem_ext A_gpu, int lda,
cl_mem_ext B_gpu, int ldb,
float BETA,
cl_mem_ext C_gpu, int ldc)
{
#ifdef BENCHMARK
clock_t t;
t = clock();
#endif
gemm_offset_gpu(TA, TB, M, N, K,
ALPHA,
A_gpu, 0, lda,
B_gpu, 0, ldb,
BETA,
C_gpu, 0, ldc);
#ifdef BENCHMARK
t = clock() - t;
double time_taken = ((double)t);
printf("%s\t%d\n", "gemm_offset_gpu", (int)time_taken);
#endif
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
cl_mem_ext a;
if(!TA) a = random_matrix_gpu(m,k);
else a = random_matrix_gpu(k,m);
int lda = (!TA)?k:m;
cl_mem_ext b;
if(!TB) b = random_matrix_gpu(k,n);
else b = random_matrix_gpu(n,k);
int ldb = (!TB)?n:k;
cl_mem_ext c = random_matrix_gpu(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
opencl_free(a);
opencl_free(b);
opencl_free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
cl_mem_ext a_cl = opencl_make_array(a, m*k);
cl_mem_ext b_cl = opencl_make_array(b, k*n);
cl_mem_ext c_cl = opencl_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
clFinish(opencl_queues[opencl_device_id_t]);
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
opencl_free(a_cl);
opencl_free(b_cl);
opencl_free(c_cl);
free(a);
free(b);
free(c);
}
/* TODO: THINK ABOUT IT?!
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
cl_mem_ext a;
if(!TA) a = random_matrix_gpu(m,k);
else a = random_matrix_gpu(k,m);
int lda = (!TA)?k:m;
cl_mem_ext b;
if(!TB) b = random_matrix_gpu(k,n);
else b = random_matrix_gpu(n,k);
int ldb = (!TB)?n:k;
cl_mem_ext c = random_matrix_gpu(m,n);
cl_mem_ext c_gpu = random_matrix_gpu(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
opencl_free(a);
opencl_free(b);
opencl_free(c);
opencl_free(c_gpu);
}
*/
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
threadprivate3.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <assert.h>
static int sum0=0;
#pragma omp threadprivate(sum0)
int main()
{
int sum=0,sum1=0;
int i;
#pragma omp parallel
{
sum0=0;
#pragma omp for
for (i=1;i<=1000;i++)
{
sum0=sum0+i;
} /*end of for*/
#pragma omp critical
{
printf("partial sum0 is:%d\n",sum0);
sum= sum+sum0;
} /*end of critical*/
} /* end of parallel*/
for (i=1;i<=1000;i++)
{
sum1=sum1+i;
} /*end of for*/
printf("sum=%d; sum1=%d\n",sum,sum1);
assert(sum==sum1);
return 0;
}
|
mtimesx_RealTimesReal.c | /*************************************************************************************
*
* MATLAB (R) is a trademark of The Mathworks (R) Corporation
*
* Filename: mtimesx_RealTimesReal.c
* Programmer: James Tursa
* Version: 1.41
* Date: February 23, 2011
* Copyright: (c) 2009, 2010, 2011 by James Tursa, All Rights Reserved
*
* This code uses the BSD License:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the distribution
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* mtimesx_RealTimesReal.c is a support file for the mtimesx.c mex routine.
*
* Change Log:
* 2009/Sep/27 --> 1.00, Initial Release
* 2009/Dec/03 --> 1.01, Fixed scalar * sparse for scalar with inf or NaN
* 2009/Dec/05 --> 1.02, Fixed bug, added line scalarmultiply = 0;
* 2009/Dec/08 --> 1.10, Added singleton expansion capability
* 2009/Dec/10 --> 1.11, Slight code simplification for singleton expansion
* 2010/Feb/23 --> 1.20, Fixed bug for dgemv and sgemv calls
* 2010/Aug/02 --> 1.30, Added (nD scalar) * (nD array) capability
* Replaced buggy mxRealloc with custom code
* 2010/Oct/04 --> 1.40, Added OpenMP support for custom code
* Expanded sparse * single and sparse * nD support
* Fixed (nD complex scalar)C * (nD array) bug.
* 2011/Feb/23 --> 1.41, Fixed typos in _syrk and _syr2k BLAS prototypes.
*
****************************************************************************/
/*---------------------------------------------------------------------------------
* Complex type for function return
*--------------------------------------------------------------------------------- */
struct RealKindComplex {RealKind r; RealKind i;};
/*---------------------------------------------------------------------------------
* Function Prototypes
*--------------------------------------------------------------------------------- */
int AllRealZero(RealKind *x, mwSignedIndex n);
void RealKindEqP1P0TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqP1P0TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqP1P0TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqP1P0TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqP1P1TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqP1P1TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqP1P1TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqP1P1TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqP1M1TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqP1M1TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqP1M1TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqP1M1TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqP1PxTimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqP1PxTimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqP1PxTimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqP1PxTimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqM1P0TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqM1P0TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqM1P0TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqM1P0TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqM1P1TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqM1P1TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqM1P1TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqM1P1TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqM1M1TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqM1M1TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqM1M1TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqM1M1TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqM1PxTimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqM1PxTimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqM1PxTimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqM1PxTimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqPxP1TimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqPxP1TimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqPxP1TimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqPxP1TimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqPxM1TimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqPxM1TimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqPxM1TimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqPxM1TimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqPxP0TimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqPxP0TimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqPxP0TimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqPxP0TimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqPxPxTimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind ai, RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqPxPxTimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ar, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n);
void RealKindEqPxPxTimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ar, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealKindEqPxPxTimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ar, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p);
void RealTimesScalar(RealKind *Cpr, RealKind *Cpi, RealKind *Bpr, RealKind *Bpi, char transb,
mwSize m2, mwSize n2, RealKind ar, RealKind ai, mwSize n, mwSize p,int);
void RealTimesScalarX(RealKind *Cpr, RealKind *Cpi, RealKind *Bpr, RealKind *Bpi, char transb,
mwSize m2, mwSize n2, RealKind ar, RealKind ai, mwSize n, mwSize p, int);
void xFILLPOS(RealKind *Cpr, mwSignedIndex n);
void xFILLNEG(RealKind *Cpr, mwSignedIndex n);
RealKind xDOT(mwSignedIndex *, RealKind *, mwSignedIndex *, RealKind *, mwSignedIndex *);
void xGER(mwSignedIndex *, mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *,
RealKind *, mwSignedIndex *, RealKind *, mwSignedIndex *);
void xSYR(char *, mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *,
RealKind *, mwSignedIndex *);
void xSYR2(char *, mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *,
RealKind *, mwSignedIndex *, RealKind *, mwSignedIndex *);
void xGEMV(char *, mwSignedIndex *, mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *,
RealKind *, mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *);
void xGEMM(char *, char *, mwSignedIndex *, mwSignedIndex *, mwSignedIndex *,
RealKind *, RealKind *, mwSignedIndex *, RealKind *, mwSignedIndex *,
RealKind *, RealKind *, mwSignedIndex *);
void xSYRK(char *, char *, mwSignedIndex *, mwSignedIndex *, RealKind *, RealKind *,
mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *);
void xSYR2K(char *, char *, mwSignedIndex *, mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *,
RealKind *, mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *);
void xAXPY(mwSignedIndex *, RealKind *, RealKind *, mwSignedIndex *, RealKind *, mwSignedIndex *);
struct RealKindComplex RealKindDotProduct(mwSignedIndex, RealKind *, RealKind *, RealKind,
RealKind *, RealKind *, RealKind, int);
struct RealKindComplex RealKindDotProductX(mwSignedIndex, RealKind *, RealKind *, RealKind,
RealKind *, RealKind *, RealKind, int);
void RealKindOuterProduct(mwSignedIndex, mwSignedIndex, RealKind *, RealKind *, char,
RealKind *, RealKind *, char, RealKind *, RealKind *, int);
void RealKindOuterProductX(mwSignedIndex, mwSignedIndex, RealKind *, RealKind *, char,
RealKind *, RealKind *, char, RealKind *, RealKind *, int);
mxArray *RealScalarTimesReal(mxArray *, char, mwSize, mwSize, mxArray *, char, mwSize, mwSize);
/*-------------------------------------------------------------------------------------
* Function for multiplying two MATLAB variables
*------------------------------------------------------------------------------------- */
mxArray *RealTimesReal(mxArray *A, char transa, mxArray *B, char transb)
{
mwSignedIndex inc = 1;
RealKind Zero = zero;
RealKind One = one;
RealKind Minusone = minusone;
char uplo = 'L'; /* Arbitrary choice. Pick the symmetric case as lower triangular */
mwSize m1, n1, m2, n2, Andim, Bndim, Cndim, Ap, Bp, Cp, ip, p, Asize, Bsize, Csize, ndim;
mwSize Ablock, Bblock;
mwSize *Adims, *Bdims, *Cdims, *Adimz, *Bdimz, *Cindx;
register mwSignedIndex i, j;
mwSignedIndex m, n, k, l, lda, ldb, ldc;
mxArray *C, *result, *rhs[4];
RealKind *Apr, *Api, *Bpr, *Bpi, *Cpr, *Cpi, *Apr0, *Api0, *Bpr0, *Bpi0;
RealKind *apr, *api, *bpr, *bpi, *cpr, *cpi;
RealKind ai, bi, sr, si, aibi;
RealKind Apr11, Apr12, Apr13, Apr14,
Apr21, Apr22, Apr23, Apr24,
Apr31, Apr32, Apr33, Apr34,
Apr41, Apr42, Apr43, Apr44;
RealKind Api11, Api12, Api13, Api14,
Api21, Api22, Api23, Api24,
Api31, Api32, Api33, Api34,
Api41, Api42, Api43, Api44;
RealKind Bpr11, Bpr12, Bpr13, Bpr14,
Bpr21, Bpr22, Bpr23, Bpr24,
Bpr31, Bpr32, Bpr33, Bpr34,
Bpr41, Bpr42, Bpr43, Bpr44;
RealKind Bpi11, Bpi12, Bpi13, Bpi14,
Bpi21, Bpi22, Bpi23, Bpi24,
Bpi31, Bpi32, Bpi33, Bpi34,
Bpi41, Bpi42, Bpi43, Bpi44;
char ptransa, ptransb;
char transstring[2] = "_";
struct RealKindComplex z;
int scalarmultiply;
int scalar_method, dot_method, outer_method;
int singleton_expansion;
int destroyA = 0, destroyB = 0;
/*--------------------------------------------------------------------------------
* Get sizes. Note that in the multi-dimensional case, mxGetN returns the product
* of all of the dimension sizes 2 through end.
*-------------------------------------------------------------------------------- */
debug_message = debug;
threads_used = 0;
m1 = mxGetM(A);
n1 = mxGetN(A);
m2 = mxGetM(B);
n2 = mxGetN(B);
/*--------------------------------------------------------------------------------
* Get pointers to the data areas of the operands.
*-------------------------------------------------------------------------------- */
Apr0 = Apr = mxGetData(A);
Api0 = Api = mxGetImagData(A);
Bpr0 = Bpr = mxGetData(B);
Bpi0 = Bpi = mxGetImagData(B);
/*--------------------------------------------------------------------------------
* Simplify transa & transb if appropriate.
*-------------------------------------------------------------------------------- */
if( !Api ) {
if( transa == 'C' ) transa = 'T';
if( transa == 'G' ) transa = 'N';
}
if( !Bpi ) {
if( transb == 'C' ) transb = 'T';
if( transb == 'G' ) transb = 'N';
}
/*--------------------------------------------------------------------------------
* Scalar expansion cases (custom sparse array code for these cases only).
* If there is a inf or NaN in the scalar and the other variable is sparse,
* then don't do the custom code because the sparse zeros will not remain
* zero. So in that case just fall through and call mtimesx_sparse.
* (1 x 1) * (K x N) or (M x K) * (1 x 1)
*-------------------------------------------------------------------------------- */
scalarmultiply = 0;
if( m1 == 1 && n1 == 1 ) {
if( debug ) {
mexPrintf("MTIMESX: (1 x 1) * (array)\n");
}
scalarmultiply = 1;
if( mxIsSparse(B) ) {
if( mxIsInf(*Apr) || mxIsNaN(*Apr) ) {
scalarmultiply = 0;
} else if( (Api != NULL) && (mxIsInf(*Api) || mxIsNaN(*Api)) ) {
scalarmultiply = 0;
}
}
} else if( m2 == 1 && n2 == 1 ) {
if( debug ) {
mexPrintf("MTIMESX: (array) * (1 x 1)\n");
}
scalarmultiply = 1;
if( mxIsSparse(A) ) {
if( mxIsInf(*Bpr) || mxIsNaN(*Bpr) ) {
scalarmultiply = 0;
} else if( (Bpi != NULL) && (mxIsInf(*Bpi) || mxIsNaN(*Bpi)) ) {
scalarmultiply = 0;
}
}
}
if( scalarmultiply ) {
return RealScalarTimesReal(A, transa, m1, n1, B, transb, m2, n2);
}
/*--------------------------------------------------------------------------------
* Multi-dimensional sparse matrix results are not supported in MATLAB, so we are
* forced to convert the sparse matrix to a full matrix for these cases. Just hope
* the memory isn't blown.
*-------------------------------------------------------------------------------- */
if( mxIsSparse(A) && mxGetNumberOfDimensions(B) > 2 ) {
k = mexCallMATLAB(1, rhs, 1, &A, "full");
A = rhs[0];
m1 = mxGetM(A);
n1 = mxGetN(A);
Apr0 = Apr = mxGetData(A);
Api0 = Api = mxGetImagData(A);
destroyA = 1;
}
if( mxIsSparse(B) && mxGetNumberOfDimensions(A) > 2 ) {
k = mexCallMATLAB(1, rhs, 1, &B, "full");
B = rhs[0];
m2 = mxGetM(B);
n2 = mxGetN(B);
Bpr0 = Bpr = mxGetData(B);
Bpi0 = Bpi = mxGetImagData(B);
destroyB = 1;
}
/*--------------------------------------------------------------------------------
* Generic sparse matrix or vector operations are not directly supported.
* So just call an m-file to do the work. Won't save any time, but at least
* the function will be supported.
*-------------------------------------------------------------------------------- */
if( mxIsSparse(A) || mxIsSparse(B) ) {
if( debug ) {
mexPrintf("MTIMESX: Unsupported sparse operation ... calling MATLAB intrinsic function mtimes\n");
}
rhs[0] = A;
transstring[0] = transa;
rhs[1] = mxCreateString(transstring);
rhs[2] = B;
transstring[0] = transb;
rhs[3] = mxCreateString(transstring);
mexCallMATLAB(1, &result, 4, rhs, "mtimesx_sparse");
mxDestroyArray(rhs[3]);
mxDestroyArray(rhs[1]);
return result;
}
/*-------------------------------------------------------------------------------
* Rename array sizes for convenience. Also makes sure that the integer arguments
* to the BLAS routines are of type mwSignedIndex.
*------------------------------------------------------------------------------- */
Andim = mxGetNumberOfDimensions(A);
Adims = (mwSize *) mxGetDimensions(A);
Bndim = mxGetNumberOfDimensions(B);
Bdims = (mwSize *) mxGetDimensions(B);
m1 = Adims[0];
n1 = Adims[1];
m2 = Bdims[0];
n2 = Bdims[1];
if( transa == 'N' || transa == 'G' ) {
m = m1;
k = n1;
} else {
m = n1;
k = m1;
}
if( transb == 'N' || transb == 'G' ) {
l = m2;
n = n2;
} else {
l = n2;
n = m2;
}
lda = m1;
ldb = m2;
ldc = m;
/*-------------------------------------------------------------------------------
* Check for conforming sizes. Allow nD scalar multiply.
*------------------------------------------------------------------------------- */
if( (k != l) && (m*k != 1) && (l*n != 1) ) {
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
mexErrMsgTxt("Inner matrix dimensions must agree.");
}
ndim = (Andim <= Bndim) ? Andim : Bndim;
for( Cp=2; Cp<ndim; Cp++ ) {
if( Adims[Cp] != Bdims[Cp] && Adims[Cp] != 1 && Bdims[Cp] != 1 ) {
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
mexErrMsgTxt("Dimensions 3:end must agree or be 1 in ND case.");
}
}
/*-------------------------------------------------------------------------------
* Check for nD scalar multiply.
*------------------------------------------------------------------------------- */
if( m*k == 1 ) {
scalarmultiply = 1;
} else if( l*n == 1 ) {
scalarmultiply = 2;
} else {
scalarmultiply = 0;
}
/*-------------------------------------------------------------------------------
* Construct the dimensions of the result. Also use the p variable to keep track
* of the total number of individual matrix multiples that are involved. The
* first two dimensions are simply the result of a single matrix multiply, with
* accouting for the transa and transb pre-operations. The remaining dimensions
* are copied from A or B, whichever happens to be non-singleton.
*------------------------------------------------------------------------------- */
Cndim = (Andim > Bndim) ? Andim : Bndim;
Cindx = mxMalloc( Cndim * sizeof(*Cindx) );
Cdims = mxMalloc( Cndim * sizeof(*Cdims) );
if( scalarmultiply == 1 ) {
Cdims[0] = l;
Cdims[1] = n;
} else if( scalarmultiply == 2 ) {
Cdims[0] = m;
Cdims[1] = k;
} else {
Cdims[0] = m;
Cdims[1] = n;
}
Adimz = mxMalloc( Cndim * sizeof(*Adimz) );
Adimz[0] = Adims[0];
Adimz[1] = Adims[1];
Bdimz = mxMalloc( Cndim * sizeof(*Bdimz) );
Bdimz[0] = Bdims[0];
Bdimz[1] = Bdims[1];
p = 1;
for( Cp=2; Cp<Cndim; Cp++ ) {
Adimz[Cp] = (Cp < Andim) ? Adims[Cp] : 1;
Bdimz[Cp] = (Cp < Bndim) ? Bdims[Cp] : 1;
Cdims[Cp] = (Adimz[Cp] > Bdimz[Cp]) ? Adimz[Cp] : Bdimz[Cp];
p *= Cdims[Cp];
}
for( Cp=0; Cp<Cndim; Cp++ ) {
Cindx[Cp] = 0;
}
/*------------------------------------------------------------------------------
* Create output array
*------------------------------------------------------------------------------ */
if( mxGetNumberOfElements(A) == 0 || mxGetNumberOfElements(B) == 0 ) {
result = mxCreateNumericArray(Cndim, Cdims, MatlabReturnType, mxREAL);
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
if( mxIsComplex(A) || mxIsComplex(B) ) {
result = mxCreateNumericArray(Cndim, Cdims, MatlabReturnType, mxCOMPLEX);
} else {
result = mxCreateNumericArray(Cndim, Cdims, MatlabReturnType, mxREAL);
}
C = result;
Cpr = mxGetData(C);
Cpi = mxGetImagData(C);
/*----------------------------------------------------------------------------
* See if we can do a simple reshape to do the nD multiply all at once
*---------------------------------------------------------------------------- */
if( Andim == 2 && Bndim > 2 && !scalarmultiply && (k == 1 || n == 1 || transb == 'N' || transb == 'G') ) {
if( debug_message ) {
mexPrintf("MTIMESX: Reshaping nD multiply as a single multiply\n");
}
if( transb == 'T' ) transb = 'N';
if( transb == 'C' ) transb = 'G';
m2 = k;
n2 = n * p;
n = n2;
p = 1;
ldb = m2;
}
if( Bndim == 2 && Andim > 2 && !scalarmultiply && m == 1 ) {
if( debug_message ) {
mexPrintf("MTIMESX: Reshaping and reordering nD multiply as a single multiply\n");
}
apr = Apr; Apr = Bpr; Bpr = apr;
api = Api; Api = Bpi; Bpi = api;
ptransa = transa; transa = transb; transb = ptransa;
if( transa == 'N' ) {
transa = 'T';
} else if( transa == 'T' ) {
transa = 'N';
} else if( transa == 'G' ) {
transa = 'C';
} else {
transa = 'G';
}
if( transb == 'C' ) {
transb = 'G';
} else if( transb == 'T' ) {
transb = 'N';
}
m1 = m2;
n1 = n2;
m2 = k;
n2 = m * p;
if( transa == 'N' || transa == 'G' ) {
m = m1;
} else {
m = n1;
}
l = k;
n = n2;
p = 1;
lda = m1;
ldb = k;
ldc = m;
}
/*------------------------------------------------------------------------------
* Set up conjugate factors
*------------------------------------------------------------------------------ */
ai = ( transa == 'C' || transa == 'G' ) ? -one : one;
bi = ( transb == 'C' || transb == 'G' ) ? -one : one;
aibi = - ai * bi;
/*----------------------------------------------------------------------------
* Individual matrix block sizes
*---------------------------------------------------------------------------- */
Asize = m1 * n1;
Bsize = m2 * n2;
Csize = Cdims[0] * Cdims[1];
#ifdef _OPENMP
/*----------------------------------------------------------------------------
* Check to see if singleton expansion is present
*---------------------------------------------------------------------------- */
singleton_expansion = 0;
for( i=2; i<Andim; i++ ) {
if( Adims[i] == 1 ) {
singleton_expansion = 1;
break;
}
}
if( !singleton_expansion ) {
for( i=2; i<Bndim; i++ ) {
if( Bdims[i] == 1 ) {
singleton_expansion = 1;
break;
}
}
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMP inline (1x1)*(KxN) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
scalarmultiply== 1 && p >= OMP_SPECIAL_SMALL && !singleton_expansion ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Ablock = Asize;
Bblock = Bsize;
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind sr_, si_;
RealKind *Apr_, *Bpr_, *Cpr_, *Api_, *Bpi_, *Cpi_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
Api_ = (Api?Api+offset*Asize:NULL);
Bpi_ = (Bpi?Bpi+offset*Bsize:NULL);
Cpi_ = (Cpi?Cpi+offset*Csize:NULL);
for( ip_=0; ip_<p_; ip_++ ) {
sr_ = *Apr_;
si_ = Api_ ? (transa=='N'||transa=='T'?*Api_:-*Api_) : zero;
RealTimesScalar(Cpr_, Cpi_, Bpr_, Bpi_, transb, m2, n2, sr_, si_, Bblock, 1, METHOD_LOOPS);
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
if( Api_ ) {
Api_ += Asize;
}
if( Bpi_ ) {
Bpi_ += Bsize;
}
if( Cpi_ ) {
Cpi_ += Csize;
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMP inline (MxK)*(1x1) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
scalarmultiply== 2 && p >= OMP_SPECIAL_SMALL && !singleton_expansion ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Ablock = Asize;
Bblock = Bsize;
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind sr_, si_;
RealKind *Apr_, *Bpr_, *Cpr_, *Api_, *Bpi_, *Cpi_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
Api_ = (Api?Api+offset*Asize:NULL);
Bpi_ = (Bpi?Bpi+offset*Bsize:NULL);
Cpi_ = (Cpi?Cpi+offset*Csize:NULL);
for( ip_=0; ip_<p_; ip_++ ) {
sr_ = *Bpr_;
si_ = Bpi_ ? (transb=='N'||transb=='T'?*Bpi_:-*Bpi_) : zero;
RealTimesScalar(Cpr_, Cpi_, Apr_, Api_, transa, m1, n1, sr_, si_, Ablock, 1, METHOD_LOOPS);
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
if( Api_ ) {
Api_ += Asize;
}
if( Bpi_ ) {
Bpi_ += Bsize;
}
if( Cpi_ ) {
Cpi_ += Csize;
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMP inline (2x2)*(2x2) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 2 && k == 2 && n == 2 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transa == 'T' ) {
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[2];
Cpr_[1] = Apr_[2] * Bpr_[0] + Apr_[3] * Bpr_[2];
Cpr_[2] = Apr_[0] * Bpr_[1] + Apr_[1] * Bpr_[3];
Cpr_[3] = Apr_[2] * Bpr_[1] + Apr_[3] * Bpr_[3];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[1];
Cpr_[1] = Apr_[2] * Bpr_[0] + Apr_[3] * Bpr_[1];
Cpr_[2] = Apr_[0] * Bpr_[2] + Apr_[1] * Bpr_[3];
Cpr_[3] = Apr_[2] * Bpr_[2] + Apr_[3] * Bpr_[3];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
} else {
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[2] * Bpr_[2];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[3] * Bpr_[2];
Cpr_[2] = Apr_[0] * Bpr_[1] + Apr_[2] * Bpr_[3];
Cpr_[3] = Apr_[1] * Bpr_[1] + Apr_[3] * Bpr_[3];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[2] * Bpr_[1];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[3] * Bpr_[1];
Cpr_[2] = Apr_[0] * Bpr_[2] + Apr_[2] * Bpr_[3];
Cpr_[3] = Apr_[1] * Bpr_[2] + Apr_[3] * Bpr_[3];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMP inline (2x2)*(2x1) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 2 && k == 2 && l == 2 && n == 1 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transa == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[1];
Cpr_[1] = Apr_[2] * Bpr_[0] + Apr_[3] * Bpr_[1];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[2] * Bpr_[1];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[3] * Bpr_[1];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMp inline (1x2)*(2x2) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 1 && k == 2 && n == 2 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[2];
Cpr_[1] = Apr_[0] * Bpr_[1] + Apr_[1] * Bpr_[3];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[1];
Cpr_[1] = Apr_[0] * Bpr_[2] + Apr_[1] * Bpr_[3];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMP inline (3x3)*(3x3) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 3 && k == 3 && n == 3 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transa == 'T' ) {
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[3] + Apr_[2] * Bpr_[6];
Cpr_[1] = Apr_[3] * Bpr_[0] + Apr_[4] * Bpr_[3] + Apr_[5] * Bpr_[6];
Cpr_[2] = Apr_[6] * Bpr_[0] + Apr_[7] * Bpr_[3] + Apr_[8] * Bpr_[6];
Cpr_[3] = Apr_[0] * Bpr_[1] + Apr_[1] * Bpr_[4] + Apr_[2] * Bpr_[7];
Cpr_[4] = Apr_[3] * Bpr_[1] + Apr_[4] * Bpr_[4] + Apr_[5] * Bpr_[7];
Cpr_[5] = Apr_[6] * Bpr_[1] + Apr_[7] * Bpr_[4] + Apr_[8] * Bpr_[7];
Cpr_[6] = Apr_[0] * Bpr_[2] + Apr_[1] * Bpr_[5] + Apr_[2] * Bpr_[8];
Cpr_[7] = Apr_[3] * Bpr_[2] + Apr_[4] * Bpr_[5] + Apr_[5] * Bpr_[8];
Cpr_[8] = Apr_[6] * Bpr_[2] + Apr_[7] * Bpr_[5] + Apr_[8] * Bpr_[8];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[1] + Apr_[2] * Bpr_[2];
Cpr_[1] = Apr_[3] * Bpr_[0] + Apr_[4] * Bpr_[1] + Apr_[5] * Bpr_[2];
Cpr_[2] = Apr_[6] * Bpr_[0] + Apr_[7] * Bpr_[1] + Apr_[8] * Bpr_[2];
Cpr_[3] = Apr_[0] * Bpr_[3] + Apr_[1] * Bpr_[4] + Apr_[2] * Bpr_[5];
Cpr_[4] = Apr_[3] * Bpr_[3] + Apr_[4] * Bpr_[4] + Apr_[5] * Bpr_[5];
Cpr_[5] = Apr_[6] * Bpr_[3] + Apr_[7] * Bpr_[4] + Apr_[8] * Bpr_[5];
Cpr_[6] = Apr_[0] * Bpr_[6] + Apr_[1] * Bpr_[7] + Apr_[2] * Bpr_[8];
Cpr_[7] = Apr_[3] * Bpr_[6] + Apr_[4] * Bpr_[7] + Apr_[5] * Bpr_[8];
Cpr_[8] = Apr_[6] * Bpr_[6] + Apr_[7] * Bpr_[7] + Apr_[8] * Bpr_[8];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
} else {
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[3] * Bpr_[3] + Apr_[6] * Bpr_[6];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[4] * Bpr_[3] + Apr_[7] * Bpr_[6];
Cpr_[2] = Apr_[2] * Bpr_[0] + Apr_[5] * Bpr_[3] + Apr_[8] * Bpr_[6];
Cpr_[3] = Apr_[0] * Bpr_[1] + Apr_[3] * Bpr_[4] + Apr_[6] * Bpr_[7];
Cpr_[4] = Apr_[1] * Bpr_[1] + Apr_[4] * Bpr_[4] + Apr_[7] * Bpr_[7];
Cpr_[5] = Apr_[2] * Bpr_[1] + Apr_[5] * Bpr_[4] + Apr_[8] * Bpr_[7];
Cpr_[6] = Apr_[0] * Bpr_[2] + Apr_[3] * Bpr_[5] + Apr_[6] * Bpr_[8];
Cpr_[7] = Apr_[1] * Bpr_[2] + Apr_[4] * Bpr_[5] + Apr_[7] * Bpr_[8];
Cpr_[8] = Apr_[2] * Bpr_[2] + Apr_[5] * Bpr_[5] + Apr_[8] * Bpr_[8];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[3] * Bpr_[1] + Apr_[6] * Bpr_[2];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[4] * Bpr_[1] + Apr_[7] * Bpr_[2];
Cpr_[2] = Apr_[2] * Bpr_[0] + Apr_[5] * Bpr_[1] + Apr_[8] * Bpr_[2];
Cpr_[3] = Apr_[0] * Bpr_[3] + Apr_[3] * Bpr_[4] + Apr_[6] * Bpr_[5];
Cpr_[4] = Apr_[1] * Bpr_[3] + Apr_[4] * Bpr_[4] + Apr_[7] * Bpr_[5];
Cpr_[5] = Apr_[2] * Bpr_[3] + Apr_[5] * Bpr_[4] + Apr_[8] * Bpr_[5];
Cpr_[6] = Apr_[0] * Bpr_[6] + Apr_[3] * Bpr_[7] + Apr_[6] * Bpr_[8];
Cpr_[7] = Apr_[1] * Bpr_[6] + Apr_[4] * Bpr_[7] + Apr_[7] * Bpr_[8];
Cpr_[8] = Apr_[2] * Bpr_[6] + Apr_[5] * Bpr_[7] + Apr_[8] * Bpr_[8];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMP inline (3x3)*(3x1) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 3 && k == 3 && l == 3 && n == 1 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transa == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[1] + Apr_[2] * Bpr_[2];
Cpr_[1] = Apr_[3] * Bpr_[0] + Apr_[4] * Bpr_[1] + Apr_[5] * Bpr_[2];
Cpr_[2] = Apr_[6] * Bpr_[0] + Apr_[7] * Bpr_[1] + Apr_[8] * Bpr_[2];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[3] * Bpr_[1] + Apr_[6] * Bpr_[2];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[4] * Bpr_[1] + Apr_[7] * Bpr_[2];
Cpr_[2] = Apr_[2] * Bpr_[0] + Apr_[5] * Bpr_[1] + Apr_[8] * Bpr_[2];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMp inline (1x3)*(3x3) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 1 && k == 3 && n == 3 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[3] + Apr_[2] * Bpr_[6];
Cpr_[1] = Apr_[0] * Bpr_[1] + Apr_[1] * Bpr_[4] + Apr_[2] * Bpr_[7];
Cpr_[2] = Apr_[0] * Bpr_[2] + Apr_[1] * Bpr_[5] + Apr_[2] * Bpr_[8];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[1] + Apr_[2] * Bpr_[2];
Cpr_[1] = Apr_[0] * Bpr_[3] + Apr_[1] * Bpr_[4] + Apr_[2] * Bpr_[5];
Cpr_[2] = Apr_[0] * Bpr_[6] + Apr_[1] * Bpr_[7] + Apr_[2] * Bpr_[8];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMP inline (4x4)*(4x4) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 4 && k == 4 && n == 4 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transa == 'T' ) {
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[ 0] * Bpr_[0] + Apr_[ 1] * Bpr_[4] + Apr_[ 2] * Bpr_[ 8] + Apr_[ 3] * Bpr_[12];
Cpr_[1] = Apr_[ 4] * Bpr_[0] + Apr_[ 5] * Bpr_[4] + Apr_[ 6] * Bpr_[ 8] + Apr_[ 7] * Bpr_[12];
Cpr_[2] = Apr_[ 8] * Bpr_[0] + Apr_[ 9] * Bpr_[4] + Apr_[10] * Bpr_[ 8] + Apr_[11] * Bpr_[12];
Cpr_[3] = Apr_[12] * Bpr_[0] + Apr_[13] * Bpr_[4] + Apr_[14] * Bpr_[ 8] + Apr_[15] * Bpr_[12];
Cpr_[4] = Apr_[ 0] * Bpr_[1] + Apr_[ 1] * Bpr_[5] + Apr_[ 2] * Bpr_[ 9] + Apr_[ 3] * Bpr_[13];
Cpr_[5] = Apr_[ 4] * Bpr_[1] + Apr_[ 5] * Bpr_[5] + Apr_[ 6] * Bpr_[ 9] + Apr_[ 7] * Bpr_[13];
Cpr_[6] = Apr_[ 8] * Bpr_[1] + Apr_[ 9] * Bpr_[5] + Apr_[10] * Bpr_[ 9] + Apr_[11] * Bpr_[13];
Cpr_[7] = Apr_[12] * Bpr_[1] + Apr_[13] * Bpr_[5] + Apr_[14] * Bpr_[ 9] + Apr_[15] * Bpr_[13];
Cpr_[8] = Apr_[ 0] * Bpr_[2] + Apr_[ 1] * Bpr_[6] + Apr_[ 2] * Bpr_[10] + Apr_[ 3] * Bpr_[14];
Cpr_[9] = Apr_[ 4] * Bpr_[2] + Apr_[ 5] * Bpr_[6] + Apr_[ 6] * Bpr_[10] + Apr_[ 7] * Bpr_[14];
Cpr_[10]= Apr_[ 8] * Bpr_[2] + Apr_[ 9] * Bpr_[6] + Apr_[10] * Bpr_[10] + Apr_[11] * Bpr_[14];
Cpr_[11]= Apr_[12] * Bpr_[2] + Apr_[13] * Bpr_[6] + Apr_[14] * Bpr_[10] + Apr_[15] * Bpr_[14];
Cpr_[12]= Apr_[ 0] * Bpr_[3] + Apr_[ 1] * Bpr_[7] + Apr_[ 2] * Bpr_[11] + Apr_[ 3] * Bpr_[15];
Cpr_[13]= Apr_[ 4] * Bpr_[3] + Apr_[ 5] * Bpr_[7] + Apr_[ 6] * Bpr_[11] + Apr_[ 7] * Bpr_[15];
Cpr_[14]= Apr_[ 8] * Bpr_[3] + Apr_[ 9] * Bpr_[7] + Apr_[10] * Bpr_[11] + Apr_[11] * Bpr_[15];
Cpr_[15]= Apr_[12] * Bpr_[3] + Apr_[13] * Bpr_[7] + Apr_[14] * Bpr_[11] + Apr_[15] * Bpr_[15];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[ 0] * Bpr_[ 0] + Apr_[ 1] * Bpr_[ 1] + Apr_[ 2] * Bpr_[ 2] + Apr_[ 3] * Bpr_[ 3];
Cpr_[1] = Apr_[ 4] * Bpr_[ 0] + Apr_[ 5] * Bpr_[ 1] + Apr_[ 6] * Bpr_[ 2] + Apr_[ 7] * Bpr_[ 3];
Cpr_[2] = Apr_[ 8] * Bpr_[ 0] + Apr_[ 9] * Bpr_[ 1] + Apr_[10] * Bpr_[ 2] + Apr_[11] * Bpr_[ 3];
Cpr_[3] = Apr_[12] * Bpr_[ 0] + Apr_[13] * Bpr_[ 1] + Apr_[14] * Bpr_[ 2] + Apr_[15] * Bpr_[ 3];
Cpr_[4] = Apr_[ 0] * Bpr_[ 4] + Apr_[ 1] * Bpr_[ 5] + Apr_[ 2] * Bpr_[ 6] + Apr_[ 3] * Bpr_[ 7];
Cpr_[5] = Apr_[ 4] * Bpr_[ 4] + Apr_[ 5] * Bpr_[ 5] + Apr_[ 6] * Bpr_[ 6] + Apr_[ 7] * Bpr_[ 7];
Cpr_[6] = Apr_[ 8] * Bpr_[ 4] + Apr_[ 9] * Bpr_[ 5] + Apr_[10] * Bpr_[ 6] + Apr_[11] * Bpr_[ 7];
Cpr_[7] = Apr_[12] * Bpr_[ 4] + Apr_[13] * Bpr_[ 5] + Apr_[14] * Bpr_[ 6] + Apr_[15] * Bpr_[ 7];
Cpr_[8] = Apr_[ 0] * Bpr_[ 8] + Apr_[ 1] * Bpr_[ 9] + Apr_[ 2] * Bpr_[10] + Apr_[ 3] * Bpr_[11];
Cpr_[9] = Apr_[ 4] * Bpr_[ 8] + Apr_[ 5] * Bpr_[ 9] + Apr_[ 6] * Bpr_[10] + Apr_[ 7] * Bpr_[11];
Cpr_[10]= Apr_[ 8] * Bpr_[ 8] + Apr_[ 9] * Bpr_[ 9] + Apr_[10] * Bpr_[10] + Apr_[11] * Bpr_[11];
Cpr_[11]= Apr_[12] * Bpr_[ 8] + Apr_[13] * Bpr_[ 9] + Apr_[14] * Bpr_[10] + Apr_[15] * Bpr_[11];
Cpr_[12]= Apr_[ 0] * Bpr_[12] + Apr_[ 1] * Bpr_[13] + Apr_[ 2] * Bpr_[14] + Apr_[ 3] * Bpr_[15];
Cpr_[13]= Apr_[ 4] * Bpr_[12] + Apr_[ 5] * Bpr_[13] + Apr_[ 6] * Bpr_[14] + Apr_[ 7] * Bpr_[15];
Cpr_[14]= Apr_[ 8] * Bpr_[12] + Apr_[ 9] * Bpr_[13] + Apr_[10] * Bpr_[14] + Apr_[11] * Bpr_[15];
Cpr_[15]= Apr_[12] * Bpr_[12] + Apr_[13] * Bpr_[13] + Apr_[14] * Bpr_[14] + Apr_[15] * Bpr_[15];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
} else {
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[4] * Bpr_[4] + Apr_[8] * Bpr_[8] + Apr_[12]* Bpr_[12];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[5] * Bpr_[4] + Apr_[9] * Bpr_[8] + Apr_[13]* Bpr_[12];
Cpr_[2] = Apr_[2] * Bpr_[0] + Apr_[6] * Bpr_[4] + Apr_[10]* Bpr_[8] + Apr_[14]* Bpr_[12];
Cpr_[3] = Apr_[3] * Bpr_[0] + Apr_[7] * Bpr_[4] + Apr_[11]* Bpr_[8] + Apr_[15]* Bpr_[12];
Cpr_[4] = Apr_[0] * Bpr_[1] + Apr_[4] * Bpr_[5] + Apr_[8] * Bpr_[9] + Apr_[12]* Bpr_[13];
Cpr_[5] = Apr_[1] * Bpr_[1] + Apr_[5] * Bpr_[5] + Apr_[9] * Bpr_[9] + Apr_[13]* Bpr_[13];
Cpr_[6] = Apr_[2] * Bpr_[1] + Apr_[6] * Bpr_[5] + Apr_[10]* Bpr_[9] + Apr_[14]* Bpr_[13];
Cpr_[7] = Apr_[3] * Bpr_[1] + Apr_[7] * Bpr_[5] + Apr_[11]* Bpr_[9] + Apr_[15]* Bpr_[13];
Cpr_[8] = Apr_[0] * Bpr_[2] + Apr_[4] * Bpr_[6] + Apr_[8] * Bpr_[10]+ Apr_[12]* Bpr_[14];
Cpr_[9] = Apr_[1] * Bpr_[2] + Apr_[5] * Bpr_[6] + Apr_[9] * Bpr_[10]+ Apr_[13]* Bpr_[14];
Cpr_[10]= Apr_[2] * Bpr_[2] + Apr_[6] * Bpr_[6] + Apr_[10]* Bpr_[10]+ Apr_[14]* Bpr_[14];
Cpr_[11]= Apr_[3] * Bpr_[2] + Apr_[7] * Bpr_[6] + Apr_[11]* Bpr_[10]+ Apr_[15]* Bpr_[14];
Cpr_[12]= Apr_[0] * Bpr_[3] + Apr_[4] * Bpr_[7] + Apr_[8] * Bpr_[11]+ Apr_[12]* Bpr_[15];
Cpr_[13]= Apr_[1] * Bpr_[3] + Apr_[5] * Bpr_[7] + Apr_[9] * Bpr_[11]+ Apr_[13]* Bpr_[15];
Cpr_[14]= Apr_[2] * Bpr_[3] + Apr_[6] * Bpr_[7] + Apr_[10]* Bpr_[11]+ Apr_[14]* Bpr_[15];
Cpr_[15]= Apr_[3] * Bpr_[3] + Apr_[7] * Bpr_[7] + Apr_[11]* Bpr_[11]+ Apr_[15]* Bpr_[15];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[4] * Bpr_[1] + Apr_[8] * Bpr_[2] + Apr_[12]* Bpr_[3];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[5] * Bpr_[1] + Apr_[9] * Bpr_[2] + Apr_[13]* Bpr_[3];
Cpr_[2] = Apr_[2] * Bpr_[0] + Apr_[6] * Bpr_[1] + Apr_[10]* Bpr_[2] + Apr_[14]* Bpr_[3];
Cpr_[3] = Apr_[3] * Bpr_[0] + Apr_[7] * Bpr_[1] + Apr_[11]* Bpr_[2] + Apr_[15]* Bpr_[3];
Cpr_[4] = Apr_[0] * Bpr_[4] + Apr_[4] * Bpr_[5] + Apr_[8] * Bpr_[6] + Apr_[12]* Bpr_[7];
Cpr_[5] = Apr_[1] * Bpr_[4] + Apr_[5] * Bpr_[5] + Apr_[9] * Bpr_[6] + Apr_[13]* Bpr_[7];
Cpr_[6] = Apr_[2] * Bpr_[4] + Apr_[6] * Bpr_[5] + Apr_[10]* Bpr_[6] + Apr_[14]* Bpr_[7];
Cpr_[7] = Apr_[3] * Bpr_[4] + Apr_[7] * Bpr_[5] + Apr_[11]* Bpr_[6] + Apr_[15]* Bpr_[7];
Cpr_[8] = Apr_[0] * Bpr_[8] + Apr_[4] * Bpr_[9] + Apr_[8] * Bpr_[10]+ Apr_[12]* Bpr_[11];
Cpr_[9] = Apr_[1] * Bpr_[8] + Apr_[5] * Bpr_[9] + Apr_[9] * Bpr_[10]+ Apr_[13]* Bpr_[11];
Cpr_[10]= Apr_[2] * Bpr_[8] + Apr_[6] * Bpr_[9] + Apr_[10]* Bpr_[10]+ Apr_[14]* Bpr_[11];
Cpr_[11]= Apr_[3] * Bpr_[8] + Apr_[7] * Bpr_[9] + Apr_[11]* Bpr_[10]+ Apr_[15]* Bpr_[11];
Cpr_[12]= Apr_[0] * Bpr_[12]+ Apr_[4] * Bpr_[13]+ Apr_[8] * Bpr_[14]+ Apr_[12]* Bpr_[15];
Cpr_[13]= Apr_[1] * Bpr_[12]+ Apr_[5] * Bpr_[13]+ Apr_[9] * Bpr_[14]+ Apr_[13]* Bpr_[15];
Cpr_[14]= Apr_[2] * Bpr_[12]+ Apr_[6] * Bpr_[13]+ Apr_[10]* Bpr_[14]+ Apr_[14]* Bpr_[15];
Cpr_[15]= Apr_[3] * Bpr_[12]+ Apr_[7] * Bpr_[13]+ Apr_[11]* Bpr_[14]+ Apr_[15]* Bpr_[15];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMP inline (4x4)*(4x1) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 4 && k == 4 && l == 4 && n == 1 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transa == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[ 0] * Bpr_[0] + Apr_[ 1] * Bpr_[1] + Apr_[ 2] * Bpr_[2] + Apr_[ 3] * Bpr_[3];
Cpr_[1] = Apr_[ 4] * Bpr_[0] + Apr_[ 5] * Bpr_[1] + Apr_[ 6] * Bpr_[2] + Apr_[ 7] * Bpr_[3];
Cpr_[2] = Apr_[ 8] * Bpr_[0] + Apr_[ 9] * Bpr_[1] + Apr_[10] * Bpr_[2] + Apr_[11] * Bpr_[3];
Cpr_[3] = Apr_[12] * Bpr_[0] + Apr_[13] * Bpr_[1] + Apr_[14] * Bpr_[2] + Apr_[15] * Bpr_[3];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[4] * Bpr_[1] + Apr_[ 8] * Bpr_[2] + Apr_[12] * Bpr_[3];
Cpr_[1] = Apr_[1] * Bpr_[0] + Apr_[5] * Bpr_[1] + Apr_[ 9] * Bpr_[2] + Apr_[13] * Bpr_[3];
Cpr_[2] = Apr_[2] * Bpr_[0] + Apr_[6] * Bpr_[1] + Apr_[10] * Bpr_[2] + Apr_[14] * Bpr_[3];
Cpr_[3] = Apr_[3] * Bpr_[0] + Apr_[7] * Bpr_[1] + Apr_[11] * Bpr_[2] + Apr_[15] * Bpr_[3];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*----------------------------------------------------------------------------
* Check to see if we can do a fast OpenMp inline (1x4)*(4x4) nD multiply
*---------------------------------------------------------------------------- */
if( (mtimesx_mode == MTIMESX_LOOPS_OMP || mtimesx_mode == MTIMESX_SPEED_OMP) && max_threads > 1 &&
m == 1 && k == 4 && n == 4 && p >= OMP_SPECIAL_SMALL && !singleton_expansion && !Cpi ) {
if( debug_message ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS (unrolled into inline multiplies)\n");
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
Asize *= (Andim > 2);
Bsize *= (Bndim > 2);
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Bpr_, *Cpr_;
mwSize ip_, p_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
Apr_ = Apr + offset * Asize;
Bpr_ = Bpr + offset * Bsize;
Cpr_ = Cpr + offset * Csize;
if( transb == 'T' ) {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[0] + Apr_[1] * Bpr_[4] + Apr_[2] * Bpr_[ 8] + Apr_[3] * Bpr_[12];
Cpr_[1] = Apr_[0] * Bpr_[1] + Apr_[1] * Bpr_[5] + Apr_[2] * Bpr_[ 9] + Apr_[3] * Bpr_[13];
Cpr_[2] = Apr_[0] * Bpr_[2] + Apr_[1] * Bpr_[6] + Apr_[2] * Bpr_[10] + Apr_[3] * Bpr_[14];
Cpr_[3] = Apr_[0] * Bpr_[3] + Apr_[1] * Bpr_[7] + Apr_[2] * Bpr_[11] + Apr_[3] * Bpr_[15];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
} else {
for( ip_=0; ip_<p_; ip_++ ) {
Cpr_[0] = Apr_[0] * Bpr_[ 0] + Apr_[1] * Bpr_[ 1] + Apr_[2] * Bpr_[ 2] + Apr_[3] * Bpr_[ 3];
Cpr_[1] = Apr_[0] * Bpr_[ 4] + Apr_[1] * Bpr_[ 5] + Apr_[2] * Bpr_[ 6] + Apr_[3] * Bpr_[ 7];
Cpr_[2] = Apr_[0] * Bpr_[ 8] + Apr_[1] * Bpr_[ 9] + Apr_[2] * Bpr_[10] + Apr_[3] * Bpr_[11];
Cpr_[3] = Apr_[0] * Bpr_[12] + Apr_[1] * Bpr_[13] + Apr_[2] * Bpr_[14] + Apr_[3] * Bpr_[15];
Apr_ += Asize;
Bpr_ += Bsize;
Cpr_ += Csize;
}
}
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
*/
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
#endif
/*----------------------------------------------------------------------------
* Get dot product method to use.
*---------------------------------------------------------------------------- */
if( mtimesx_mode == MTIMESX_BLAS || mtimesx_mode == MTIMESX_MATLAB ) {
dot_method = METHOD_BLAS;
} else if( mtimesx_mode == MTIMESX_LOOPS ) {
dot_method = METHOD_LOOPS;
} else if( mtimesx_mode == MTIMESX_LOOPS_OMP ) {
dot_method = METHOD_LOOPS_OMP;
} else {
#if !defined(_MSC_VER) || _MSC_VER < 1500 /* Version 2008, 9.0 */
if( (Apr != Bpr) && !Api && !Bpi ) {
dot_method = METHOD_BLAS;
} else if( mtimesx_mode == MTIMESX_SPEED_OMP && max_threads > 1 ) {
#else
if( mtimesx_mode == MTIMESX_SPEED_OMP && max_threads > 1 ) {
#endif
if( Apr != Bpr ) {
if( Api && Bpi ) {
dot_method = METHOD_LOOPS_OMP;
} else {
dot_method = METHOD_LOOPS;
}
} else {
if( Api && (ai * bi == -one) ) {
dot_method = METHOD_BLAS;
} else {
dot_method = METHOD_LOOPS_OMP;
}
}
} else {
#ifdef __LCC__
if( (Apr == Bpr && (!Api || (ai * bi == -one))) || omp_get_num_procs() > 2 ) {
dot_method = METHOD_BLAS;
} else {
dot_method = METHOD_LOOPS;
}
#else
if( Apr == Bpr && (!Api || (ai * bi == -one)) ) {
dot_method = METHOD_BLAS;
} else {
dot_method = METHOD_LOOPS;
}
#endif
}
}
/*----------------------------------------------------------------------------
* Get outer product method to use.
*---------------------------------------------------------------------------- */
switch( mtimesx_mode )
{
case MTIMESX_BLAS:
outer_method = METHOD_BLAS;
break;
case MTIMESX_LOOPS:
outer_method = METHOD_LOOPS;
break;
case MTIMESX_LOOPS_OMP:
outer_method = METHOD_LOOPS_OMP;
break;
case MTIMESX_SPEED_OMP:
if( max_threads > 1 ) {
outer_method = METHOD_LOOPS_OMP;
break;
}
case MTIMESX_MATLAB:
case MTIMESX_SPEED:
#ifdef __LCC__
if( Api && Bpi && omp_get_num_procs() <= 2 ) {
#else
if( (Apr == Bpr) || (Api && Bpi) ) {
#endif
outer_method = METHOD_LOOPS;
} else {
outer_method = METHOD_BLAS;
}
break;
}
/*----------------------------------------------------------------------------
* Get scalar product method to use.
*---------------------------------------------------------------------------- */
switch( mtimesx_mode )
{
case MTIMESX_BLAS:
scalar_method = METHOD_BLAS;
break;
case MTIMESX_LOOPS:
scalar_method = METHOD_LOOPS;
break;
case MTIMESX_LOOPS_OMP:
scalar_method = METHOD_LOOPS_OMP;
break;
case MTIMESX_SPEED_OMP:
if( max_threads > 1 ) {
scalar_method = METHOD_LOOPS_OMP;
break;
}
case MTIMESX_MATLAB:
case MTIMESX_SPEED:
if( ai != zero && Bpi ) {
scalar_method = METHOD_LOOPS;
} else {
scalar_method = METHOD_BLAS;
}
break;
}
/*----------------------------------------------------------------------------
* Outer Loop to process all of the individual matrix multiplies
*---------------------------------------------------------------------------- */
if( debug ) {
mexPrintf("MTIMESX: Performing %d individual multiplies\n",p);
}
for( ip=0; ip<p; ip++ ) {
ptransa = transa; /* Restore the original transa and transb, because */
ptransb = transb; /* they might have been changed in previous iteration */
if( debug_message ) {
mexPrintf("MTIMESX: (%d x %d) * (%d x %d)\n",m,k,l,n);
}
/*----------------------------------------------------------------------------
* Scalar product (1 x 1) * (K x N)
*---------------------------------------------------------------------------- */
if( scalarmultiply == 1 ) {
sr = *Apr;
si = Api ? (transa=='N'||transa=='T'?*Api:-*Api) : zero;
RealTimesScalar(Cpr, Cpi, Bpr, Bpi, transb, m2, n2, sr, si, Bsize, 1, scalar_method);
/*----------------------------------------------------------------------------
* Scalar product (M x K) * (1 x 1)
*---------------------------------------------------------------------------- */
} else if( scalarmultiply == 2 ) {
sr = *Bpr;
si = Bpi ? (transb=='N'||transb=='T'?*Bpi:-*Bpi) : zero;
RealTimesScalar(Cpr, Cpi, Apr, Api, transa, m1, n1, sr, si, Asize, 1, scalar_method);
/*---------------------------------------------------------------------------------
* Small matrix times small matrix (M x K) * (K x N) use inline code. Only use this
* method if running in the 'SPEED' mode and M, K, N are all <= 4.
*--------------------------------------------------------------------------------- */
} else if( mtimesx_mode != MTIMESX_BLAS && mtimesx_mode != MTIMESX_MATLAB && m <= 4 && k <= 4 && n <= 4 ) {
if( debug_message ) {
mexPrintf("MTIMESX: LOOPS (unrolled into inline switch statements)\n");
}
/* Form B elements, taking size and transb into account */
switch( k ) {
case 1: /* (m x 1)*(1 x n) */
switch( n ) {
case 1:
mexErrMsgTxt("Internal Error (m x 1)*(1 x 1), contact author.");
break;
case 2: /* (m x 1)*(1 x 2) */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1];
if( Bpi ) {
if( transb == 'N' || transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1];
} else { /* transb == 'G' || transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1];
}
}
break;
case 3: /* (m x 1)*(1 x 3) */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1]; Bpr13 = Bpr[2];
if( Bpi ) {
if( transb == 'N' || transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1]; Bpi13 = Bpi[2];
} else { /* transb == 'G' || transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1]; Bpi13 =-Bpi[2];
}
}
break;
case 4: /* (m x 1)*(1 x 4) */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1]; Bpr13 = Bpr[2]; Bpr14 = Bpr[3];
if( Bpi ) {
if( transb == 'N' || transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1]; Bpi13 = Bpi[2]; Bpi14 = Bpi[3];
} else { /* transb == 'G' || transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1]; Bpi13 =-Bpi[2]; Bpi14 =-Bpi[3];
}
}
break;
}
break;
case 2: /* (m x 2)*(2 x n) */
switch( n ) {
case 1: /* (m x 2)*(2 x 1) */
Bpr11 = Bpr[0];
Bpr21 = Bpr[1];
if( Bpi ) {
if( transb == 'N' || transb == 'T' ) {
Bpi11 = Bpi[0];
Bpi21 = Bpi[1];
} else { /* transb == 'G' || transb == 'C' */
Bpi11 =-Bpi[0];
Bpi21 =-Bpi[1];
}
}
break;
case 2: /* (m x 2)*(2 x 2) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[2];
Bpr21 = Bpr[1]; Bpr22 = Bpr[3];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1];
Bpr21 = Bpr[2]; Bpr22 = Bpr[3];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[2];
Bpi21 = Bpi[1]; Bpi22 = Bpi[3];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[2];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[3];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1];
Bpi21 = Bpi[2]; Bpi22 = Bpi[3];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1];
Bpi21 =-Bpi[2]; Bpi22 =-Bpi[3];
}
}
break;
case 3: /* (m x 2)*(2 x 3) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[2]; Bpr13 = Bpr[4];
Bpr21 = Bpr[1]; Bpr22 = Bpr[3]; Bpr23 = Bpr[5];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1]; Bpr13 = Bpr[2];
Bpr21 = Bpr[3]; Bpr22 = Bpr[4]; Bpr23 = Bpr[5];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[2]; Bpi13 = Bpi[4];
Bpi21 = Bpi[1]; Bpi22 = Bpi[3]; Bpi23 = Bpi[5];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[2]; Bpi13 =-Bpi[4];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[3]; Bpi23 =-Bpi[5];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1]; Bpi13 = Bpi[2];
Bpi21 = Bpi[3]; Bpi22 = Bpi[4]; Bpi23 = Bpi[5];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1]; Bpi13 =-Bpi[2];
Bpi21 =-Bpi[3]; Bpi22 =-Bpi[4]; Bpi23 =-Bpi[5];
}
}
break;
case 4: /* (m x 2)*(2 x 4) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[2]; Bpr13 = Bpr[4]; Bpr14 = Bpr[6];
Bpr21 = Bpr[1]; Bpr22 = Bpr[3]; Bpr23 = Bpr[5]; Bpr24 = Bpr[7];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1]; Bpr13 = Bpr[2]; Bpr14 = Bpr[3];
Bpr21 = Bpr[4]; Bpr22 = Bpr[5]; Bpr23 = Bpr[6]; Bpr24 = Bpr[7];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[2]; Bpi13 = Bpi[4]; Bpi14 = Bpi[6];
Bpi21 = Bpi[1]; Bpi22 = Bpi[3]; Bpi23 = Bpi[5]; Bpi24 = Bpi[7];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[2]; Bpi13 =-Bpi[4]; Bpi14 =-Bpi[6];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[3]; Bpi23 =-Bpi[5]; Bpi24 =-Bpi[7];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1]; Bpi13 = Bpi[2]; Bpi14 = Bpi[3];
Bpi21 = Bpi[4]; Bpi22 = Bpi[5]; Bpi23 = Bpi[6]; Bpi24 = Bpi[7];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1]; Bpi13 =-Bpi[2]; Bpi14 =-Bpi[3];
Bpi21 =-Bpi[4]; Bpi22 =-Bpi[5]; Bpi23 =-Bpi[6]; Bpi24 =-Bpi[7];
}
}
break;
}
break;
case 3: /* (m x 3)*(3 x n) */
switch( n ) {
case 1: /* (m x 3)*(3 x 1) */
Bpr11 = Bpr[0];
Bpr21 = Bpr[1];
Bpr31 = Bpr[2];
if( Bpi ) {
if( transb == 'N' || transb == 'T' ) {
Bpi11 = Bpi[0];
Bpi21 = Bpi[1];
Bpi31 = Bpi[2];
} else { /* transb == 'G' || transb == 'C' */
Bpi11 =-Bpi[0];
Bpi21 =-Bpi[1];
Bpi31 =-Bpi[2];
}
}
break;
case 2: /* (m x 3)*(3 x 2) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[3];
Bpr21 = Bpr[1]; Bpr22 = Bpr[4];
Bpr31 = Bpr[2]; Bpr32 = Bpr[5];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1];
Bpr21 = Bpr[2]; Bpr22 = Bpr[3];
Bpr31 = Bpr[4]; Bpr32 = Bpr[5];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[3];
Bpi21 = Bpi[1]; Bpi22 = Bpi[4];
Bpi31 = Bpi[2]; Bpi32 = Bpi[5];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[3];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[4];
Bpi31 =-Bpi[2]; Bpi32 =-Bpi[5];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1];
Bpi21 = Bpi[2]; Bpi22 = Bpi[3];
Bpi31 = Bpi[4]; Bpi32 = Bpi[5];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1];
Bpi21 =-Bpi[2]; Bpi22 =-Bpi[3];
Bpi31 =-Bpi[4]; Bpi32 =-Bpi[5];
}
}
break;
case 3: /* (m x 3)*(3 x 3) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[3]; Bpr13 = Bpr[6];
Bpr21 = Bpr[1]; Bpr22 = Bpr[4]; Bpr23 = Bpr[7];
Bpr31 = Bpr[2]; Bpr32 = Bpr[5]; Bpr33 = Bpr[8];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1]; Bpr13 = Bpr[2];
Bpr21 = Bpr[3]; Bpr22 = Bpr[4]; Bpr23 = Bpr[5];
Bpr31 = Bpr[6]; Bpr32 = Bpr[7]; Bpr33 = Bpr[8];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[3]; Bpi13 = Bpi[6];
Bpi21 = Bpi[1]; Bpi22 = Bpi[4]; Bpi23 = Bpi[7];
Bpi31 = Bpi[2]; Bpi32 = Bpi[5]; Bpi33 = Bpi[8];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[3]; Bpi13 =-Bpi[6];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[4]; Bpi23 =-Bpi[7];
Bpi31 =-Bpi[2]; Bpi32 =-Bpi[5]; Bpi33 =-Bpi[8];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1]; Bpi13 = Bpi[2];
Bpi21 = Bpi[3]; Bpi22 = Bpi[4]; Bpi23 = Bpi[5];
Bpi31 = Bpi[6]; Bpi32 = Bpi[7]; Bpi33 = Bpi[8];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1]; Bpi13 =-Bpi[2];
Bpi21 =-Bpi[3]; Bpi22 =-Bpi[4]; Bpi23 =-Bpi[5];
Bpi31 =-Bpi[6]; Bpi32 =-Bpi[7]; Bpi33 =-Bpi[8];
}
}
break;
case 4: /* (m x 3)*(3 x 4) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[3]; Bpr13 = Bpr[6]; Bpr14 = Bpr[9];
Bpr21 = Bpr[1]; Bpr22 = Bpr[4]; Bpr23 = Bpr[7]; Bpr24 = Bpr[10];
Bpr31 = Bpr[2]; Bpr32 = Bpr[5]; Bpr33 = Bpr[8]; Bpr34 = Bpr[11];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1]; Bpr13 = Bpr[2]; Bpr14 = Bpr[3];
Bpr21 = Bpr[4]; Bpr22 = Bpr[5]; Bpr23 = Bpr[6]; Bpr24 = Bpr[7];
Bpr31 = Bpr[8]; Bpr32 = Bpr[9]; Bpr33 = Bpr[10];Bpr34 = Bpr[11];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[3]; Bpi13 = Bpi[6]; Bpi14 = Bpi[9];
Bpi21 = Bpi[1]; Bpi22 = Bpi[4]; Bpi23 = Bpi[7]; Bpi24 = Bpi[10];
Bpi31 = Bpi[2]; Bpi32 = Bpi[5]; Bpi33 = Bpi[8]; Bpi34 = Bpi[11];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[3]; Bpi13 =-Bpi[6]; Bpi14 =-Bpi[9];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[4]; Bpi23 =-Bpi[7]; Bpi24 =-Bpi[10];
Bpi31 =-Bpi[2]; Bpi32 =-Bpi[5]; Bpi33 =-Bpi[8]; Bpi34 =-Bpi[11];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1]; Bpi13 = Bpi[2]; Bpi14 = Bpi[3];
Bpi21 = Bpi[4]; Bpi22 = Bpi[5]; Bpi23 = Bpi[6]; Bpi24 = Bpi[7];
Bpi31 = Bpi[8]; Bpi32 = Bpi[9]; Bpi33 = Bpi[10];Bpi34 = Bpi[11];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1]; Bpi13 =-Bpi[2]; Bpi14 =-Bpi[3];
Bpi21 =-Bpi[4]; Bpi22 =-Bpi[5]; Bpi23 =-Bpi[6]; Bpi24 =-Bpi[7];
Bpi31 =-Bpi[8]; Bpi32 =-Bpi[9]; Bpi33 =-Bpi[10];Bpi34 =-Bpi[11];
}
}
break;
}
break;
case 4: /* (m x 4)*(4 x n) */
switch( n ) {
case 1: /* (m x 4)*(4 x 1) */
Bpr11 = Bpr[0];
Bpr21 = Bpr[1];
Bpr31 = Bpr[2];
Bpr41 = Bpr[3];
if( Bpi ) {
if( transb == 'N' || transb == 'T' ) {
Bpi11 = Bpi[0];
Bpi21 = Bpi[1];
Bpi31 = Bpi[2];
Bpi41 = Bpi[3];
} else { /* transb == 'G' || transb == 'C' */
Bpi11 =-Bpi[0];
Bpi21 =-Bpi[1];
Bpi31 =-Bpi[2];
Bpi41 =-Bpi[3];
}
}
break;
case 2: /* (m x 4)*(4 x 2) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[4];
Bpr21 = Bpr[1]; Bpr22 = Bpr[5];
Bpr31 = Bpr[2]; Bpr32 = Bpr[6];
Bpr41 = Bpr[3]; Bpr42 = Bpr[7];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1];
Bpr21 = Bpr[2]; Bpr22 = Bpr[3];
Bpr31 = Bpr[4]; Bpr32 = Bpr[5];
Bpr41 = Bpr[6]; Bpr42 = Bpr[7];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[4];
Bpi21 = Bpi[1]; Bpi22 = Bpi[5];
Bpi31 = Bpi[2]; Bpi32 = Bpi[6];
Bpi41 = Bpi[3]; Bpi42 = Bpi[7];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[4];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[5];
Bpi31 =-Bpi[2]; Bpi32 =-Bpi[6];
Bpi41 =-Bpi[3]; Bpi42 =-Bpi[7];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1];
Bpi21 = Bpi[2]; Bpi22 = Bpi[3];
Bpi31 = Bpi[4]; Bpi32 = Bpi[5];
Bpi41 = Bpi[6]; Bpi42 = Bpi[7];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1];
Bpi21 =-Bpi[2]; Bpi22 =-Bpi[3];
Bpi31 =-Bpi[4]; Bpi32 =-Bpi[5];
Bpi41 =-Bpi[6]; Bpi42 =-Bpi[7];
}
}
break;
case 3: /* (m x 4)*(4 x 3) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[4]; Bpr13 = Bpr[8];
Bpr21 = Bpr[1]; Bpr22 = Bpr[5]; Bpr23 = Bpr[9];
Bpr31 = Bpr[2]; Bpr32 = Bpr[6]; Bpr33 = Bpr[10];
Bpr41 = Bpr[3]; Bpr42 = Bpr[7]; Bpr43 = Bpr[11];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1]; Bpr13 = Bpr[2];
Bpr21 = Bpr[3]; Bpr22 = Bpr[4]; Bpr23 = Bpr[5];
Bpr31 = Bpr[6]; Bpr32 = Bpr[7]; Bpr33 = Bpr[8];
Bpr41 = Bpr[9]; Bpr42 = Bpr[10];Bpr43 = Bpr[11];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[4]; Bpi13 = Bpi[8];
Bpi21 = Bpi[1]; Bpi22 = Bpi[5]; Bpi23 = Bpi[9];
Bpi31 = Bpi[2]; Bpi32 = Bpi[6]; Bpi33 = Bpi[10];
Bpi41 = Bpi[3]; Bpi42 = Bpi[7]; Bpi43 = Bpi[11];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[4]; Bpi13 =-Bpi[8];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[5]; Bpi23 =-Bpi[9];
Bpi31 =-Bpi[2]; Bpi32 =-Bpi[6]; Bpi33 =-Bpi[10];
Bpi41 =-Bpi[3]; Bpi42 =-Bpi[7]; Bpi43 =-Bpi[11];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1]; Bpi13 = Bpi[2];
Bpi21 = Bpi[3]; Bpi22 = Bpi[4]; Bpi23 = Bpi[5];
Bpi31 = Bpi[6]; Bpi32 = Bpi[7]; Bpi33 = Bpi[8];
Bpi41 = Bpi[9]; Bpi42 = Bpi[10];Bpi43 = Bpi[11];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1]; Bpi13 =-Bpi[2];
Bpi21 =-Bpi[3]; Bpi22 =-Bpi[4]; Bpi23 =-Bpi[5];
Bpi31 =-Bpi[6]; Bpi32 =-Bpi[7]; Bpi33 =-Bpi[8];
Bpi41 =-Bpi[9]; Bpi42 =-Bpi[10];Bpi43 =-Bpi[11];
}
}
break;
case 4: /* (m x 4)*(4 x 4) */
if( transb == 'N' || transb == 'G' ) {
Bpr11 = Bpr[0]; Bpr12 = Bpr[4]; Bpr13 = Bpr[8]; Bpr14 = Bpr[12];
Bpr21 = Bpr[1]; Bpr22 = Bpr[5]; Bpr23 = Bpr[9]; Bpr24 = Bpr[13];
Bpr31 = Bpr[2]; Bpr32 = Bpr[6]; Bpr33 = Bpr[10];Bpr34 = Bpr[14];
Bpr41 = Bpr[3]; Bpr42 = Bpr[7]; Bpr43 = Bpr[11];Bpr44 = Bpr[15];
} else { /* transa == 'T' || transa == 'C' */
Bpr11 = Bpr[0]; Bpr12 = Bpr[1]; Bpr13 = Bpr[2]; Bpr14 = Bpr[3];
Bpr21 = Bpr[4]; Bpr22 = Bpr[5]; Bpr23 = Bpr[6]; Bpr24 = Bpr[7];
Bpr31 = Bpr[8]; Bpr32 = Bpr[9]; Bpr33 = Bpr[10];Bpr34 = Bpr[11];
Bpr41 = Bpr[12];Bpr42 = Bpr[13];Bpr43 = Bpr[14];Bpr44 = Bpr[15];
}
if( Bpi ) {
if( transb == 'N' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[4]; Bpi13 = Bpi[8]; Bpi14 = Bpi[12];
Bpi21 = Bpi[1]; Bpi22 = Bpi[5]; Bpi23 = Bpi[9]; Bpi24 = Bpi[13];
Bpi31 = Bpi[2]; Bpi32 = Bpi[6]; Bpi33 = Bpi[10];Bpi34 = Bpi[14];
Bpi41 = Bpi[3]; Bpi42 = Bpi[7]; Bpi43 = Bpi[11];Bpi44 = Bpi[15];
} else if( transb == 'G' ) {
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[4]; Bpi13 =-Bpi[8]; Bpi14 =-Bpi[12];
Bpi21 =-Bpi[1]; Bpi22 =-Bpi[5]; Bpi23 =-Bpi[9]; Bpi24 =-Bpi[13];
Bpi31 =-Bpi[2]; Bpi32 =-Bpi[6]; Bpi33 =-Bpi[10];Bpi34 =-Bpi[14];
Bpi41 =-Bpi[3]; Bpi42 =-Bpi[7]; Bpi43 =-Bpi[11];Bpi44 =-Bpi[15];
} else if( transb == 'T' ) {
Bpi11 = Bpi[0]; Bpi12 = Bpi[1]; Bpi13 = Bpi[2]; Bpi14 = Bpi[3];
Bpi21 = Bpi[4]; Bpi22 = Bpi[5]; Bpi23 = Bpi[6]; Bpi24 = Bpi[7];
Bpi31 = Bpi[8]; Bpi32 = Bpi[9]; Bpi33 = Bpi[10];Bpi34 = Bpi[11];
Bpi41 = Bpi[12];Bpi42 = Bpi[13];Bpi43 = Bpi[14];Bpi44 = Bpi[15];
} else {/* transb == 'C' */
Bpi11 =-Bpi[0]; Bpi12 =-Bpi[1]; Bpi13 =-Bpi[2]; Bpi14 =-Bpi[3];
Bpi21 =-Bpi[4]; Bpi22 =-Bpi[5]; Bpi23 =-Bpi[6]; Bpi24 =-Bpi[7];
Bpi31 =-Bpi[8]; Bpi32 =-Bpi[9]; Bpi33 =-Bpi[10];Bpi34 =-Bpi[11];
Bpi41 =-Bpi[12];Bpi42 =-Bpi[13];Bpi43 =-Bpi[14];Bpi44 =-Bpi[15];
}
}
break;
}
break;
}
/* Form A elements and do the multiply */
switch( m ) {
case 1: /* (1 x k)*(k x n) */
switch( k ) {
case 1: /* (1 x 1)*(1 x n) */
mexErrMsgTxt("Internal Error (1 x 1)*(1 x n), contact author.");
break;
case 2: /* (1 x 2)*(2 x n) */
Apr11 = Apr[0]; Apr12 = Apr[1];
if( Api ) {
if( transa == 'N' || transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1];
} else { /* transa == 'G' || transa == 'C' */
Api11 =-Api[0]; Api12 =-Api[1];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[3] = Apr11 * Bpr14 + Apr12 * Bpr24
- Api11 * Bpi14 - Api12 * Bpi24;
Cpi[3] = Apr11 * Bpi14 + Apr12 * Bpi24
+ Api11 * Bpr14 + Api12 * Bpr24;
case 3:
Cpr[2] = Apr11 * Bpr13 + Apr12 * Bpr23
- Api11 * Bpi13 - Api12 * Bpi23;
Cpi[2] = Apr11 * Bpi13 + Apr12 * Bpi23
+ Api11 * Bpr13 + Api12 * Bpr23;
case 2:
Cpr[1] = Apr11 * Bpr12 + Apr12 * Bpr22
- Api11 * Bpi12 - Api12 * Bpi22;
Cpi[1] = Apr11 * Bpi12 + Apr12 * Bpi22
+ Api11 * Bpr12 + Api12 * Bpr22;
case 1:
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21
- Api11 * Bpi11 - Api12 * Bpi21;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21
+ Api11 * Bpr11 + Api12 * Bpr21;
}
} else {
switch( n ) {
case 4:
Cpr[3] = Apr11 * Bpr14 + Apr12 * Bpr24;
case 3:
Cpr[2] = Apr11 * Bpr13 + Apr12 * Bpr23;
case 2:
Cpr[1] = Apr11 * Bpr12 + Apr12 * Bpr22;
case 1:
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[3] = Api11 * Bpr14 + Api12 * Bpr24;
case 3:
Cpi[2] = Api11 * Bpr13 + Api12 * Bpr23;
case 2:
Cpi[1] = Api11 * Bpr12 + Api12 * Bpr22;
case 1:
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[3] = Apr11 * Bpi14 + Apr12 * Bpi24;
case 3:
Cpi[2] = Apr11 * Bpi13 + Apr12 * Bpi23;
case 2:
Cpi[1] = Apr11 * Bpi12 + Apr12 * Bpi22;
case 1:
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21;
}
}
}
break;
case 3: /* (1 x 3)*(3 x n) */
Apr11 = Apr[0]; Apr12 = Apr[1]; Apr13 = Apr[2];
if( Api ) {
if( transa == 'N' || transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1]; Api13 = Api[2];
} else { /* transa == 'G' || transa == 'C' */
Api11 =-Api[0]; Api12 =-Api[1]; Api13 =-Api[2];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[3] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34
- Api11 * Bpi14 - Api12 * Bpi24 - Api13 * Bpi34;
Cpi[3] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34
+ Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34;
case 3:
Cpr[2] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33
- Api11 * Bpi13 - Api12 * Bpi23 - Api13 * Bpi33;
Cpi[2] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33
+ Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33;
case 2:
Cpr[1] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32
- Api11 * Bpi12 - Api12 * Bpi22 - Api13 * Bpi32;
Cpi[1] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32
+ Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32;
case 1:
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31
- Api11 * Bpi11 - Api12 * Bpi21 - Api13 * Bpi31;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31
+ Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31;
}
} else {
switch( n ) {
case 4:
Cpr[3] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34;
case 3:
Cpr[2] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33;
case 2:
Cpr[1] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32;
case 1:
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[3] = Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34;
case 3:
Cpi[2] = Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33;
case 2:
Cpi[1] = Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32;
case 1:
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[3] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34;
case 3:
Cpi[2] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33;
case 2:
Cpi[1] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32;
case 1:
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31;
}
}
}
break;
case 4: /* (1 x 4)*(4 x n) */
Apr11 = Apr[0]; Apr12 = Apr[1]; Apr13 = Apr[2]; Apr14 = Apr[3];
if( Api ) {
if( transa == 'N' || transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1]; Api13 = Api[2]; Api14 = Api[3];
} else { /* transa == 'G' || transa == 'C' */
Api11 =-Api[0]; Api12 =-Api[1]; Api13 =-Api[2]; Api14 =-Api[3];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[3] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34 + Apr14 * Bpr44
- Api11 * Bpi14 - Api12 * Bpi24 - Api13 * Bpi34 - Api14 * Bpi44;
Cpi[3] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34 + Apr14 * Bpi44
+ Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34 + Api14 * Bpr44;
case 3:
Cpr[2] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33 + Apr14 * Bpr43
- Api11 * Bpi13 - Api12 * Bpi23 - Api13 * Bpi33 - Api14 * Bpi43;
Cpi[2] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33 + Apr14 * Bpi43
+ Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33 + Api14 * Bpr43;
case 2:
Cpr[1] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32 + Apr14 * Bpr42
- Api11 * Bpi12 - Api12 * Bpi22 - Api13 * Bpi32 - Api14 * Bpi42;
Cpi[1] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32 + Apr14 * Bpi42
+ Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32 + Api14 * Bpr42;
case 1:
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31 + Apr14 * Bpr41
- Api11 * Bpi11 - Api12 * Bpi21 - Api13 * Bpi31 - Api14 * Bpi41;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31 + Apr14 * Bpi41
+ Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31 + Api14 * Bpr41;
}
} else {
switch( n ) {
case 4:
Cpr[3] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34 + Apr14 * Bpr44;
case 3:
Cpr[2] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33 + Apr14 * Bpr43;
case 2:
Cpr[1] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32 + Apr14 * Bpr42;
case 1:
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31 + Apr14 * Bpr41;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[3] = Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34 + Api14 * Bpr44;
case 3:
Cpi[2] = Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33 + Api14 * Bpr43;
case 2:
Cpi[1] = Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32 + Api14 * Bpr42;
case 1:
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31 + Api14 * Bpr41;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[3] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34 + Apr14 * Bpi44;
case 3:
Cpi[2] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33 + Apr14 * Bpi43;
case 2:
Cpi[1] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32 + Apr14 * Bpi42;
case 1:
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31 + Apr14 * Bpi41;
}
}
}
break;
}
break;
case 2: /* (2 x k)*(k x n) */
switch( k ) {
case 1: /* (2 x 1)*(1 x n) */
Apr11 = Apr[0];
Apr21 = Apr[1];
if( Api ) {
if( transa == 'N' || transa == 'T' ) {
Api11 = Api[0];
Api21 = Api[1];
} else { /* transa == 'G' || transa == 'C' */
Api11 =-Api[0];
Api21 =-Api[1];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[7] = Apr21 * Bpr14
- Api21 * Bpi14;
Cpr[6] = Apr11 * Bpr14
- Api11 * Bpi14;
Cpi[7] = Apr21 * Bpi14
+ Api21 * Bpr14;
Cpi[6] = Apr11 * Bpi14
+ Api11 * Bpr14;
case 3:
Cpr[5] = Apr21 * Bpr13
- Api21 * Bpi13;
Cpr[4] = Apr11 * Bpr13
- Api11 * Bpi13;
Cpi[5] = Apr21 * Bpi13
+ Api21 * Bpr13;
Cpi[4] = Apr11 * Bpi13
+ Api11 * Bpr13;
case 2:
Cpr[3] = Apr21 * Bpr12
- Api21 * Bpi12;
Cpr[2] = Apr11 * Bpr12
- Api11 * Bpi12;
Cpi[3] = Apr21 * Bpi12
+ Api21 * Bpr12;
Cpi[2] = Apr11 * Bpi12
+ Api11 * Bpr12;
case 1:
Cpr[1] = Apr21 * Bpr11
- Api21 * Bpi11;
Cpr[0] = Apr11 * Bpr11
- Api11 * Bpi11;
Cpi[1] = Apr21 * Bpi11
+ Api21 * Bpr11;
Cpi[0] = Apr11 * Bpi11
+ Api11 * Bpr11;
}
} else {
switch( n ) {
case 4:
Cpr[7] = Apr21 * Bpr14;
Cpr[6] = Apr11 * Bpr14;
case 3:
Cpr[5] = Apr21 * Bpr13;
Cpr[4] = Apr11 * Bpr13;
case 2:
Cpr[3] = Apr21 * Bpr12;
Cpr[2] = Apr11 * Bpr12;
case 1:
Cpr[1] = Apr21 * Bpr11;
Cpr[0] = Apr11 * Bpr11;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[7] = Api21 * Bpr14;
Cpi[6] = Api11 * Bpr14;
case 3:
Cpi[5] = Api21 * Bpr13;
Cpi[4] = Api11 * Bpr13;
case 2:
Cpi[3] = Api21 * Bpr12;
Cpi[2] = Api11 * Bpr12;
case 1:
Cpi[1] = Api21 * Bpr11;
Cpi[0] = Api11 * Bpr11;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[7] = Apr21 * Bpi14;
Cpi[6] = Apr11 * Bpi14;
case 3:
Cpi[5] = Apr21 * Bpi13;
Cpi[4] = Apr11 * Bpi13;
case 2:
Cpi[3] = Apr21 * Bpi12;
Cpi[2] = Apr11 * Bpi12;
case 1:
Cpi[1] = Apr21 * Bpi11;
Cpi[0] = Apr11 * Bpi11;
}
}
}
break;
case 2: /* (2 x 2)*(2 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[2];
Apr21 = Apr[1]; Apr22 = Apr[3];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1];
Apr21 = Apr[2]; Apr22 = Apr[3];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[2];
Api21 = Api[1]; Api22 = Api[3];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1];
Api21 = Api[2]; Api22 = Api[3];
} else if( transa == 'C' ) {
Api11 = -Api[0]; Api12 = -Api[1];
Api21 = -Api[2]; Api22 = -Api[3];
} else {/* transa == 'G' */
Api11 = -Api[0]; Api12 = -Api[2];
Api21 = -Api[1]; Api22 = -Api[3];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[7] = Apr21 * Bpr14 + Apr22 * Bpr24
- Api21 * Bpi14 - Api22 * Bpi24;
Cpr[6] = Apr11 * Bpr14 + Apr12 * Bpr24
- Api11 * Bpi14 - Api12 * Bpi24;
Cpi[7] = Apr21 * Bpi14 + Apr22 * Bpi24
+ Api21 * Bpr14 + Api22 * Bpr24;
Cpi[6] = Apr11 * Bpi14 + Apr12 * Bpi24
+ Api11 * Bpr14 + Api12 * Bpr24;
case 3:
Cpr[5] = Apr21 * Bpr13 + Apr22 * Bpr23
- Api21 * Bpi13 - Api22 * Bpi23;
Cpr[4] = Apr11 * Bpr13 + Apr12 * Bpr23
- Api11 * Bpi13 - Api12 * Bpi23;
Cpi[5] = Apr21 * Bpi13 + Apr22 * Bpi23
+ Api21 * Bpr13 + Api22 * Bpr23;
Cpi[4] = Apr11 * Bpi13 + Apr12 * Bpi23
+ Api11 * Bpr13 + Api12 * Bpr23;
case 2:
Cpr[3] = Apr21 * Bpr12 + Apr22 * Bpr22
- Api21 * Bpi12 - Api22 * Bpi22;
Cpr[2] = Apr11 * Bpr12 + Apr12 * Bpr22
- Api11 * Bpi12 - Api12 * Bpi22;
Cpi[3] = Apr21 * Bpi12 + Apr22 * Bpi22
+ Api21 * Bpr12 + Api22 * Bpr22;
Cpi[2] = Apr11 * Bpi12 + Apr12 * Bpi22
+ Api11 * Bpr12 + Api12 * Bpr22;
case 1:
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21
- Api21 * Bpi11 - Api22 * Bpi21;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21
- Api11 * Bpi11 - Api12 * Bpi21;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21
+ Api21 * Bpr11 + Api22 * Bpr21;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21
+ Api11 * Bpr11 + Api12 * Bpr21;
}
} else {
switch( n ) {
case 4:
Cpr[7] = Apr21 * Bpr14 + Apr22 * Bpr24;
Cpr[6] = Apr11 * Bpr14 + Apr12 * Bpr24;
case 3:
Cpr[5] = Apr21 * Bpr13 + Apr22 * Bpr23;
Cpr[4] = Apr11 * Bpr13 + Apr12 * Bpr23;
case 2:
Cpr[3] = Apr21 * Bpr12 + Apr22 * Bpr22;
Cpr[2] = Apr11 * Bpr12 + Apr12 * Bpr22;
case 1:
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[7] = Api21 * Bpr14 + Api22 * Bpr24;
Cpi[6] = Api11 * Bpr14 + Api12 * Bpr24;
case 3:
Cpi[5] = Api21 * Bpr13 + Api22 * Bpr23;
Cpi[4] = Api11 * Bpr13 + Api12 * Bpr23;
case 2:
Cpi[3] = Api21 * Bpr12 + Api22 * Bpr22;
Cpi[2] = Api11 * Bpr12 + Api12 * Bpr22;
case 1:
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[7] = Apr21 * Bpi14 + Apr22 * Bpi24;
Cpi[6] = Apr11 * Bpi14 + Apr12 * Bpi24;
case 3:
Cpi[5] = Apr21 * Bpi13 + Apr22 * Bpi23;
Cpi[4] = Apr11 * Bpi13 + Apr12 * Bpi23;
case 2:
Cpi[3] = Apr21 * Bpi12 + Apr22 * Bpi22;
Cpi[2] = Apr11 * Bpi12 + Apr12 * Bpi22;
case 1:
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21;
}
}
}
break;
case 3: /* (2 x 3)*(3 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[2]; Apr13 = Apr[4];
Apr21 = Apr[1]; Apr22 = Apr[3]; Apr23 = Apr[5];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1]; Apr13 = Apr[2];
Apr21 = Apr[3]; Apr22 = Apr[4]; Apr23 = Apr[5];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[2]; Api13 = Api[4];
Api21 = Api[1]; Api22 = Api[3]; Api23 = Api[5];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1]; Api13 = Api[2];
Api21 = Api[3]; Api22 = Api[4]; Api23 = Api[5];
} else if( transa == 'C' ) {
Api11 =-Api[0]; Api12 =-Api[1]; Api13 =-Api[2];
Api21 =-Api[3]; Api22 =-Api[4]; Api23 =-Api[5];
} else {/* transa == 'G' */
Api11 =-Api[0]; Api12 =-Api[2]; Api13 =-Api[4];
Api21 =-Api[1]; Api22 =-Api[3]; Api23 =-Api[5];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[7] = Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34
- Api21 * Bpi14 - Api22 * Bpi24 - Api23 * Bpi34;
Cpr[6] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34
- Api11 * Bpi14 - Api12 * Bpi24 - Api13 * Bpi34;
Cpi[7] = Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34
+ Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34;
Cpi[6] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34
+ Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34;
case 3:
Cpr[5] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33
- Api21 * Bpi13 - Api22 * Bpi23 - Api23 * Bpi33;
Cpr[4] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33
- Api11 * Bpi13 - Api12 * Bpi23 - Api13 * Bpi33;
Cpi[5] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33
+ Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33;
Cpi[4] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33
+ Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33;
case 2:
Cpr[3] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32
- Api21 * Bpi12 - Api22 * Bpi22 - Api23 * Bpi32;
Cpr[2] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32
- Api11 * Bpi12 - Api12 * Bpi22 - Api13 * Bpi32;
Cpi[3] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32
+ Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32;
Cpi[2] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32
+ Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32;
case 1:
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31
- Api21 * Bpi11 - Api22 * Bpi21 - Api23 * Bpi31;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31
- Api11 * Bpi11 - Api12 * Bpi21 - Api13 * Bpi31;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31
+ Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31
+ Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31;
}
} else {
switch( n ) {
case 4:
Cpr[7] = Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34;
Cpr[6] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34;
case 3:
Cpr[5] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33;
Cpr[4] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33;
case 2:
Cpr[3] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32;
Cpr[2] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32;
case 1:
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[7] = Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34;
Cpi[6] = Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34;
case 3:
Cpi[5] = Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33;
Cpi[4] = Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33;
case 2:
Cpi[3] = Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32;
Cpi[2] = Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32;
case 1:
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[7] = Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34;
Cpi[6] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34;
case 3:
Cpi[5] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33;
Cpi[4] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33;
case 2:
Cpi[3] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32;
Cpi[2] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32;
case 1:
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31;
}
}
}
break;
case 4: /* (2 x 4)*(4 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[2]; Apr13 = Apr[4]; Apr14 = Apr[6];
Apr21 = Apr[1]; Apr22 = Apr[3]; Apr23 = Apr[5]; Apr24 = Apr[7];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1]; Apr13 = Apr[2]; Apr14 = Apr[3];
Apr21 = Apr[4]; Apr22 = Apr[5]; Apr23 = Apr[6]; Apr24 = Apr[7];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[2]; Api13 = Api[4]; Api14 = Api[6];
Api21 = Api[1]; Api22 = Api[3]; Api23 = Api[5]; Api24 = Api[7];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1]; Api13 = Api[2]; Api14 = Api[3];
Api21 = Api[4]; Api22 = Api[5]; Api23 = Api[6]; Api24 = Api[7];
} else if( transa == 'G' ) {
Api11 =-Api[0]; Api12 =-Api[2]; Api13 =-Api[4]; Api14 =-Api[6];
Api21 =-Api[1]; Api22 =-Api[3]; Api23 =-Api[5]; Api24 =-Api[7];
} else { /* transa == 'C' */
Api11 =-Api[0]; Api12 =-Api[1]; Api13 =-Api[2]; Api14 =-Api[3];
Api21 =-Api[4]; Api22 =-Api[5]; Api23 =-Api[6]; Api24 =-Api[7];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[7] = Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34 + Apr24 * Bpr44
- Api21 * Bpi14 - Api22 * Bpi24 - Api23 * Bpi34 - Api24 * Bpi44;
Cpr[6] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34 + Apr14 * Bpr44
- Api11 * Bpi14 - Api12 * Bpi24 - Api13 * Bpi34 - Api14 * Bpi44;
Cpi[7] = Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34 + Apr24 * Bpi44
+ Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34 + Api24 * Bpr44;
Cpi[6] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34 + Apr14 * Bpi44
+ Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34 + Api14 * Bpr44;
case 3:
Cpr[5] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33 + Apr24 * Bpr43
- Api21 * Bpi13 - Api22 * Bpi23 - Api23 * Bpi33 - Api24 * Bpi43;
Cpr[4] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33 + Apr14 * Bpr43
- Api11 * Bpi13 - Api12 * Bpi23 - Api13 * Bpi33 - Api14 * Bpi43;
Cpi[5] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33 + Apr24 * Bpi43
+ Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33 + Api24 * Bpr43;
Cpi[4] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33 + Apr14 * Bpi43
+ Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33 + Api14 * Bpr43;
case 2:
Cpr[3] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32 + Apr24 * Bpr42
- Api21 * Bpi12 - Api22 * Bpi22 - Api23 * Bpi32 - Api24 * Bpi42;
Cpr[2] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32 + Apr14 * Bpr42
- Api11 * Bpi12 - Api12 * Bpi22 - Api13 * Bpi32 - Api14 * Bpi42;
Cpi[3] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32 + Apr24 * Bpi42
+ Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32 + Api24 * Bpr42;
Cpi[2] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32 + Apr14 * Bpi42
+ Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32 + Api14 * Bpr42;
case 1:
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31 + Apr24 * Bpr41
- Api21 * Bpi11 - Api22 * Bpi21 - Api23 * Bpi31 - Api24 * Bpi41;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31 + Apr14 * Bpr41
- Api11 * Bpi11 - Api12 * Bpi21 - Api13 * Bpi31 - Api14 * Bpi41;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31 + Apr24 * Bpi41
+ Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31 + Api24 * Bpr41;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31 + Apr14 * Bpi41
+ Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31 + Api14 * Bpr41;
}
} else {
switch( n ) {
case 4:
Cpr[7] = Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34 + Apr24 * Bpr44;
Cpr[6] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34 + Apr14 * Bpr44;
case 3:
Cpr[5] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33 + Apr24 * Bpr43;
Cpr[4] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33 + Apr14 * Bpr43;
case 2:
Cpr[3] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32 + Apr24 * Bpr42;
Cpr[2] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32 + Apr14 * Bpr42;
case 1:
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31 + Apr24 * Bpr41;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31 + Apr14 * Bpr41;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[7] = Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34 + Api24 * Bpr44;
Cpi[6] = Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34 + Api14 * Bpr44;
case 3:
Cpi[5] = Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33 + Api24 * Bpr43;
Cpi[4] = Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33 + Api14 * Bpr43;
case 2:
Cpi[3] = Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32 + Api24 * Bpr42;
Cpi[2] = Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32 + Api14 * Bpr42;
case 1:
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31 + Api24 * Bpr41;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31 + Api14 * Bpr41;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[7] = Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34 + Apr24 * Bpi44;
Cpi[6] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34 + Apr14 * Bpi44;
case 3:
Cpi[5] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33 + Apr24 * Bpi43;
Cpi[4] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33 + Apr14 * Bpi43;
case 2:
Cpi[3] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32 + Apr24 * Bpi42;
Cpi[2] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32 + Apr14 * Bpi42;
case 1:
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31 + Apr24 * Bpi41;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31 + Apr14 * Bpi41;
}
}
}
break;
}
break;
case 3: /* (3 x k)*(k x n) */
switch( k ) {
case 1: /* (3 x 1)*(1 x n) */
Apr11 = Apr[0];
Apr21 = Apr[1];
Apr31 = Apr[2];
if( Api ) {
if( transa == 'N' || transa == 'T' ) {
Api11 = Api[0];
Api21 = Api[1];
Api31 = Api[2];
} else { /* transa == 'G' || transa == 'C' */
Api11 =-Api[0];
Api21 =-Api[1];
Api31 =-Api[2];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[11]= Apr31 * Bpr14
- Api31 * Bpi14;
Cpr[10]= Apr21 * Bpr14
- Api21 * Bpi14;
Cpr[9] = Apr11 * Bpr14
- Api11 * Bpi14;
Cpi[11]= Apr31 * Bpi14
+ Api31 * Bpr14;
Cpi[10]= Apr21 * Bpi14
+ Api21 * Bpr14;
Cpi[9] = Apr11 * Bpi14
+ Api11 * Bpr14;
case 3:
Cpr[8] = Apr31 * Bpr13
- Api31 * Bpi13;
Cpr[7] = Apr21 * Bpr13
- Api21 * Bpi13;
Cpr[6] = Apr11 * Bpr13
- Api11 * Bpi13;
Cpi[8] = Apr31 * Bpi13
+ Api31 * Bpr13;
Cpi[7] = Apr21 * Bpi13
+ Api21 * Bpr13;
Cpi[6] = Apr11 * Bpi13
+ Api11 * Bpr13;
case 2:
Cpr[5] = Apr31 * Bpr12
- Api31 * Bpi12;
Cpr[4] = Apr21 * Bpr12
- Api21 * Bpi12;
Cpr[3] = Apr11 * Bpr12
- Api11 * Bpi12;
Cpi[5] = Apr31 * Bpi12
+ Api31 * Bpr12;
Cpi[4] = Apr21 * Bpi12
+ Api21 * Bpr12;
Cpi[3] = Apr11 * Bpi12
+ Api11 * Bpr12;
case 1:
Cpr[2] = Apr31 * Bpr11
- Api31 * Bpi11;
Cpr[1] = Apr21 * Bpr11
- Api21 * Bpi11;
Cpr[0] = Apr11 * Bpr11
- Api11 * Bpi11;
Cpi[2] = Apr31 * Bpi11
+ Api31 * Bpr11;
Cpi[1] = Apr21 * Bpi11
+ Api21 * Bpr11;
Cpi[0] = Apr11 * Bpi11
+ Api11 * Bpr11;
}
} else {
switch( n ) {
case 4:
Cpr[11]= Apr31 * Bpr14;
Cpr[10]= Apr21 * Bpr14;
Cpr[9] = Apr11 * Bpr14;
case 3:
Cpr[8] = Apr31 * Bpr13;
Cpr[7] = Apr21 * Bpr13;
Cpr[6] = Apr11 * Bpr13;
case 2:
Cpr[5] = Apr31 * Bpr12;
Cpr[4] = Apr21 * Bpr12;
Cpr[3] = Apr11 * Bpr12;
case 1:
Cpr[2] = Apr31 * Bpr11;
Cpr[1] = Apr21 * Bpr11;
Cpr[0] = Apr11 * Bpr11;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[11]= Api31 * Bpr14;
Cpi[10]= Api21 * Bpr14;
Cpi[9] = Api11 * Bpr14;
case 3:
Cpi[8] = Api31 * Bpr13;
Cpi[7] = Api21 * Bpr13;
Cpi[6] = Api11 * Bpr13;
case 2:
Cpi[5] = Api31 * Bpr12;
Cpi[4] = Api21 * Bpr12;
Cpi[3] = Api11 * Bpr12;
case 1:
Cpi[2] = Api31 * Bpr11;
Cpi[1] = Api21 * Bpr11;
Cpi[0] = Api11 * Bpr11;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[11]= Apr31 * Bpi14;
Cpi[10]= Apr21 * Bpi14;
Cpi[9] = Apr11 * Bpi14;
case 3:
Cpi[8] = Apr31 * Bpi13;
Cpi[7] = Apr21 * Bpi13;
Cpi[6] = Apr11 * Bpi13;
case 2:
Cpi[5] = Apr31 * Bpi12;
Cpi[4] = Apr21 * Bpi12;
Cpi[3] = Apr11 * Bpi12;
case 1:
Cpi[2] = Apr31 * Bpi11;
Cpi[1] = Apr21 * Bpi11;
Cpi[0] = Apr11 * Bpi11;
}
}
}
break;
case 2: /* (3 x 2)*(2 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[3];
Apr21 = Apr[1]; Apr22 = Apr[4];
Apr31 = Apr[2]; Apr32 = Apr[5];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1];
Apr21 = Apr[2]; Apr22 = Apr[3];
Apr31 = Apr[4]; Apr32 = Apr[5];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[3];
Api21 = Api[1]; Api22 = Api[4];
Api31 = Api[2]; Api32 = Api[5];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1];
Api21 = Api[2]; Api22 = Api[3];
Api31 = Api[4]; Api32 = Api[5];
} else if( transa == 'C' ) {
Api11 = -Api[0]; Api12 = -Api[1];
Api21 = -Api[2]; Api22 = -Api[3];
Api31 = -Api[4]; Api32 = -Api[5];
} else {/* transa == 'G' */
Api11 = -Api[0]; Api12 = -Api[3];
Api21 = -Api[1]; Api22 = -Api[4];
Api31 = -Api[2]; Api32 = -Api[5];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[11]= Apr31 * Bpr14 + Apr32 * Bpr24
- Api31 * Bpi14 - Api32 * Bpi24;
Cpr[10]= Apr21 * Bpr14 + Apr22 * Bpr24
- Api21 * Bpi14 - Api22 * Bpi24;
Cpr[9] = Apr11 * Bpr14 + Apr12 * Bpr24
- Api11 * Bpi14 - Api12 * Bpi24;
Cpi[11]= Apr31 * Bpi14 + Apr32 * Bpi24
+ Api31 * Bpr14 + Api32 * Bpr24;
Cpi[10]= Apr21 * Bpi14 + Apr22 * Bpi24
+ Api21 * Bpr14 + Api22 * Bpr24;
Cpi[9] = Apr11 * Bpi14 + Apr12 * Bpi24
+ Api11 * Bpr14 + Api12 * Bpr24;
case 3:
Cpr[8] = Apr31 * Bpr13 + Apr32 * Bpr23
- Api31 * Bpi13 - Api32 * Bpi23;
Cpr[7] = Apr21 * Bpr13 + Apr22 * Bpr23
- Api21 * Bpi13 - Api22 * Bpi23;
Cpr[6] = Apr11 * Bpr13 + Apr12 * Bpr23
- Api11 * Bpi13 - Api12 * Bpi23;
Cpi[8] = Apr31 * Bpi13 + Apr32 * Bpi23
+ Api31 * Bpr13 + Api32 * Bpr23;
Cpi[7] = Apr21 * Bpi13 + Apr22 * Bpi23
+ Api21 * Bpr13 + Api22 * Bpr23;
Cpi[6] = Apr11 * Bpi13 + Apr12 * Bpi23
+ Api11 * Bpr13 + Api12 * Bpr23;
case 2:
Cpr[5] = Apr31 * Bpr12 + Apr32 * Bpr22
- Api31 * Bpi12 - Api32 * Bpi22;
Cpr[4] = Apr21 * Bpr12 + Apr22 * Bpr22
- Api21 * Bpi12 - Api22 * Bpi22;
Cpr[3] = Apr11 * Bpr12 + Apr12 * Bpr22
- Api11 * Bpi12 - Api12 * Bpi22;
Cpi[5] = Apr31 * Bpi12 + Apr32 * Bpi22
+ Api31 * Bpr12 + Api32 * Bpr22;
Cpi[4] = Apr21 * Bpi12 + Apr22 * Bpi22
+ Api21 * Bpr12 + Api22 * Bpr22;
Cpi[3] = Apr11 * Bpi12 + Apr12 * Bpi22
+ Api11 * Bpr12 + Api12 * Bpr22;
case 1:
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21
- Api31 * Bpi11 - Api32 * Bpi21;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21
- Api21 * Bpi11 - Api22 * Bpi21;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21
- Api11 * Bpi11 - Api12 * Bpi21;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21
+ Api31 * Bpr11 + Api32 * Bpr21;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21
+ Api21 * Bpr11 + Api22 * Bpr21;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21
+ Api11 * Bpr11 + Api12 * Bpr21;
}
} else {
switch( n ) {
case 4:
Cpr[11]= Apr31 * Bpr14 + Apr32 * Bpr24;
Cpr[10]= Apr21 * Bpr14 + Apr22 * Bpr24;
Cpr[9] = Apr11 * Bpr14 + Apr12 * Bpr24;
case 3:
Cpr[8] = Apr31 * Bpr13 + Apr32 * Bpr23;
Cpr[7] = Apr21 * Bpr13 + Apr22 * Bpr23;
Cpr[6] = Apr11 * Bpr13 + Apr12 * Bpr23;
case 2:
Cpr[5] = Apr31 * Bpr12 + Apr32 * Bpr22;
Cpr[4] = Apr21 * Bpr12 + Apr22 * Bpr22;
Cpr[3] = Apr11 * Bpr12 + Apr12 * Bpr22;
case 1:
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[11]= Api31 * Bpr14 + Api32 * Bpr24;
Cpi[10]= Api21 * Bpr14 + Api22 * Bpr24;
Cpi[9] = Api11 * Bpr14 + Api12 * Bpr24;
case 3:
Cpi[8] = Api31 * Bpr13 + Api32 * Bpr23;
Cpi[7] = Api21 * Bpr13 + Api22 * Bpr23;
Cpi[6] = Api11 * Bpr13 + Api12 * Bpr23;
case 2:
Cpi[5] = Api31 * Bpr12 + Api32 * Bpr22;
Cpi[4] = Api21 * Bpr12 + Api22 * Bpr22;
Cpi[3] = Api11 * Bpr12 + Api12 * Bpr22;
case 1:
Cpi[2] = Api31 * Bpr11 + Api32 * Bpr21;
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[11]= Apr31 * Bpi14 + Apr32 * Bpi24;
Cpi[10]= Apr21 * Bpi14 + Apr22 * Bpi24;
Cpi[9] = Apr11 * Bpi14 + Apr12 * Bpi24;
case 3:
Cpi[8] = Apr31 * Bpi13 + Apr32 * Bpi23;
Cpi[7] = Apr21 * Bpi13 + Apr22 * Bpi23;
Cpi[6] = Apr11 * Bpi13 + Apr12 * Bpi23;
case 2:
Cpi[5] = Apr31 * Bpi12 + Apr32 * Bpi22;
Cpi[4] = Apr21 * Bpi12 + Apr22 * Bpi22;
Cpi[3] = Apr11 * Bpi12 + Apr12 * Bpi22;
case 1:
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21;
}
}
}
break;
case 3: /* (3 x 3)*(3 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[3]; Apr13 = Apr[6];
Apr21 = Apr[1]; Apr22 = Apr[4]; Apr23 = Apr[7];
Apr31 = Apr[2]; Apr32 = Apr[5]; Apr33 = Apr[8];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1]; Apr13 = Apr[2];
Apr21 = Apr[3]; Apr22 = Apr[4]; Apr23 = Apr[5];
Apr31 = Apr[6]; Apr32 = Apr[7]; Apr33 = Apr[8];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[3]; Api13 = Api[6];
Api21 = Api[1]; Api22 = Api[4]; Api23 = Api[7];
Api31 = Api[2]; Api32 = Api[5]; Api33 = Api[8];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1]; Api13 = Api[2];
Api21 = Api[3]; Api22 = Api[4]; Api23 = Api[5];
Api31 = Api[6]; Api32 = Api[7]; Api33 = Api[8];
} else if( transa == 'C' ) {
Api11 =-Api[0]; Api12 =-Api[1]; Api13 =-Api[2];
Api21 =-Api[3]; Api22 =-Api[4]; Api23 =-Api[5];
Api31 =-Api[6]; Api32 =-Api[7]; Api33 =-Api[8];
} else {/* transa == 'G' */
Api11 =-Api[0]; Api12 =-Api[3]; Api13 =-Api[6];
Api21 =-Api[1]; Api22 =-Api[4]; Api23 =-Api[7];
Api31 =-Api[2]; Api32 =-Api[5]; Api33 =-Api[8];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[11]= Apr31 * Bpr14 + Apr32 * Bpr24 + Apr33 * Bpr34
- Api31 * Bpi14 - Api32 * Bpi24 - Api33 * Bpi34;
Cpr[10]= Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34
- Api21 * Bpi14 - Api22 * Bpi24 - Api23 * Bpi34;
Cpr[9] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34
- Api11 * Bpi14 - Api12 * Bpi24 - Api13 * Bpi34;
Cpi[11]= Apr31 * Bpi14 + Apr32 * Bpi24 + Apr33 * Bpi34
+ Api31 * Bpr14 + Api32 * Bpr24 + Api33 * Bpr34;
Cpi[10]= Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34
+ Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34;
Cpi[9] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34
+ Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34;
case 3:
Cpr[8] = Apr31 * Bpr13 + Apr32 * Bpr23 + Apr33 * Bpr33
- Api31 * Bpi13 - Api32 * Bpi23 - Api33 * Bpi33;
Cpr[7] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33
- Api21 * Bpi13 - Api22 * Bpi23 - Api23 * Bpi33;
Cpr[6] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33
- Api11 * Bpi13 - Api12 * Bpi23 - Api13 * Bpi33;
Cpi[8] = Apr31 * Bpi13 + Apr32 * Bpi23 + Apr33 * Bpi33
+ Api31 * Bpr13 + Api32 * Bpr23 + Api33 * Bpr33;
Cpi[7] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33
+ Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33;
Cpi[6] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33
+ Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33;
case 2:
Cpr[5] = Apr31 * Bpr12 + Apr32 * Bpr22 + Apr33 * Bpr32
- Api31 * Bpi12 - Api32 * Bpi22 - Api33 * Bpi32;
Cpr[4] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32
- Api21 * Bpi12 - Api22 * Bpi22 - Api23 * Bpi32;
Cpr[3] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32
- Api11 * Bpi12 - Api12 * Bpi22 - Api13 * Bpi32;
Cpi[5] = Apr31 * Bpi12 + Apr32 * Bpi22 + Apr33 * Bpi32
+ Api31 * Bpr12 + Api32 * Bpr22 + Api33 * Bpr32;
Cpi[4] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32
+ Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32;
Cpi[3] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32
+ Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32;
case 1:
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21 + Apr33 * Bpr31
- Api31 * Bpi11 - Api32 * Bpi21 - Api33 * Bpi31;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31
- Api21 * Bpi11 - Api22 * Bpi21 - Api23 * Bpi31;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31
- Api11 * Bpi11 - Api12 * Bpi21 - Api13 * Bpi31;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21 + Apr33 * Bpi31
+ Api31 * Bpr11 + Api32 * Bpr21 + Api33 * Bpr31;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31
+ Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31
+ Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31;
}
} else {
switch( n ) {
case 4:
Cpr[11]= Apr31 * Bpr14 + Apr32 * Bpr24 + Apr33 * Bpr34;
Cpr[10]= Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34;
Cpr[9] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34;
case 3:
Cpr[8] = Apr31 * Bpr13 + Apr32 * Bpr23 + Apr33 * Bpr33;
Cpr[7] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33;
Cpr[6] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33;
case 2:
Cpr[5] = Apr31 * Bpr12 + Apr32 * Bpr22 + Apr33 * Bpr32;
Cpr[4] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32;
Cpr[3] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32;
case 1:
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21 + Apr33 * Bpr31;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[11]= Api31 * Bpr14 + Api32 * Bpr24 + Api33 * Bpr34;
Cpi[10]= Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34;
Cpi[9] = Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34;
case 3:
Cpi[8] = Api31 * Bpr13 + Api32 * Bpr23 + Api33 * Bpr33;
Cpi[7] = Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33;
Cpi[6] = Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33;
case 2:
Cpi[5] = Api31 * Bpr12 + Api32 * Bpr22 + Api33 * Bpr32;
Cpi[4] = Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32;
Cpi[3] = Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32;
case 1:
Cpi[2] = Api31 * Bpr11 + Api32 * Bpr21 + Api33 * Bpr31;
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[11]= Apr31 * Bpi14 + Apr32 * Bpi24 + Apr33 * Bpi34;
Cpi[10]= Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34;
Cpi[9] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34;
case 3:
Cpi[8] = Apr31 * Bpi13 + Apr32 * Bpi23 + Apr33 * Bpi33;
Cpi[7] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33;
Cpi[6] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33;
case 2:
Cpi[5] = Apr31 * Bpi12 + Apr32 * Bpi22 + Apr33 * Bpi32;
Cpi[4] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32;
Cpi[3] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32;
case 1:
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21 + Apr33 * Bpi31;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31;
}
}
}
break;
case 4: /* (3 x 4)*(4 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[3]; Apr13 = Apr[6]; Apr14 = Apr[9];
Apr21 = Apr[1]; Apr22 = Apr[4]; Apr23 = Apr[7]; Apr24 = Apr[10];
Apr31 = Apr[2]; Apr32 = Apr[5]; Apr33 = Apr[8]; Apr34 = Apr[11];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1]; Apr13 = Apr[2]; Apr14 = Apr[3];
Apr21 = Apr[4]; Apr22 = Apr[5]; Apr23 = Apr[6]; Apr24 = Apr[7];
Apr31 = Apr[8]; Apr32 = Apr[9]; Apr33 = Apr[10];Apr34 = Apr[11];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[3]; Api13 = Api[6]; Api14 = Api[9];
Api21 = Api[1]; Api22 = Api[4]; Api23 = Api[7]; Api24 = Api[10];
Api31 = Api[2]; Api32 = Api[5]; Api33 = Api[8]; Api34 = Api[11];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1]; Api13 = Api[2]; Api14 = Api[3];
Api21 = Api[4]; Api22 = Api[5]; Api23 = Api[6]; Api24 = Api[7];
Api31 = Api[8]; Api32 = Api[9]; Api33 = Api[10];Api34 = Api[11];
} else if( transa == 'G' ) {
Api11 =-Api[0]; Api12 =-Api[3]; Api13 =-Api[6]; Api14 =-Api[9];
Api21 =-Api[1]; Api22 =-Api[4]; Api23 =-Api[7]; Api24 =-Api[10];
Api31 =-Api[2]; Api32 =-Api[5]; Api33 =-Api[8]; Api34 =-Api[11];
} else { /* transa == 'C' */
Api11 =-Api[0]; Api12 =-Api[1]; Api13 =-Api[2]; Api14 =-Api[3];
Api21 =-Api[4]; Api22 =-Api[5]; Api23 =-Api[6]; Api24 =-Api[7];
Api31 =-Api[8]; Api32 =-Api[9]; Api33 =-Api[10];Api34 =-Api[11];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[11]= Apr31 * Bpr14 + Apr32 * Bpr24 + Apr33 * Bpr34 + Apr34 * Bpr44
- Api31 * Bpi14 - Api32 * Bpi24 - Api33 * Bpi34 - Api34 * Bpi44;
Cpr[10]= Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34 + Apr24 * Bpr44
- Api21 * Bpi14 - Api22 * Bpi24 - Api23 * Bpi34 - Api24 * Bpi44;
Cpr[9] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34 + Apr14 * Bpr44
- Api11 * Bpi14 - Api12 * Bpi24 - Api13 * Bpi34 - Api14 * Bpi44;
Cpi[11]= Apr31 * Bpi14 + Apr32 * Bpi24 + Apr33 * Bpi34 + Apr34 * Bpi44
+ Api31 * Bpr14 + Api32 * Bpr24 + Api33 * Bpr34 + Api34 * Bpr44;
Cpi[10]= Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34 + Apr24 * Bpi44
+ Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34 + Api24 * Bpr44;
Cpi[9] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34 + Apr14 * Bpi44
+ Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34 + Api14 * Bpr44;
case 3:
Cpr[8] = Apr31 * Bpr13 + Apr32 * Bpr23 + Apr33 * Bpr33 + Apr34 * Bpr43
- Api31 * Bpi13 - Api32 * Bpi23 - Api33 * Bpi33 - Api34 * Bpi43;
Cpr[7] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33 + Apr24 * Bpr43
- Api21 * Bpi13 - Api22 * Bpi23 - Api23 * Bpi33 - Api24 * Bpi43;
Cpr[6] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33 + Apr14 * Bpr43
- Api11 * Bpi13 - Api12 * Bpi23 - Api13 * Bpi33 - Api14 * Bpi43;
Cpi[8] = Apr31 * Bpi13 + Apr32 * Bpi23 + Apr33 * Bpi33 + Apr34 * Bpi43
+ Api31 * Bpr13 + Api32 * Bpr23 + Api33 * Bpr33 + Api34 * Bpr43;
Cpi[7] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33 + Apr24 * Bpi43
+ Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33 + Api24 * Bpr43;
Cpi[6] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33 + Apr14 * Bpi43
+ Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33 + Api14 * Bpr43;
case 2:
Cpr[5] = Apr31 * Bpr12 + Apr32 * Bpr22 + Apr33 * Bpr32 + Apr34 * Bpr42
- Api31 * Bpi12 - Api32 * Bpi22 - Api33 * Bpi32 - Api34 * Bpi42;
Cpr[4] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32 + Apr24 * Bpr42
- Api21 * Bpi12 - Api22 * Bpi22 - Api23 * Bpi32 - Api24 * Bpi42;
Cpr[3] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32 + Apr14 * Bpr42
- Api11 * Bpi12 - Api12 * Bpi22 - Api13 * Bpi32 - Api14 * Bpi42;
Cpi[5] = Apr31 * Bpi12 + Apr32 * Bpi22 + Apr33 * Bpi32 + Apr34 * Bpi42
+ Api31 * Bpr12 + Api32 * Bpr22 + Api33 * Bpr32 + Api34 * Bpr42;
Cpi[4] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32 + Apr24 * Bpi42
+ Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32 + Api24 * Bpr42;
Cpi[3] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32 + Apr14 * Bpi42
+ Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32 + Api14 * Bpr42;
case 1:
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21 + Apr33 * Bpr31 + Apr34 * Bpr41
- Api31 * Bpi11 - Api32 * Bpi21 - Api33 * Bpi31 - Api34 * Bpi41;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31 + Apr24 * Bpr41
- Api21 * Bpi11 - Api22 * Bpi21 - Api23 * Bpi31 - Api24 * Bpi41;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31 + Apr14 * Bpr41
- Api11 * Bpi11 - Api12 * Bpi21 - Api13 * Bpi31 - Api14 * Bpi41;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21 + Apr33 * Bpi31 + Apr34 * Bpi41
+ Api31 * Bpr11 + Api32 * Bpr21 + Api33 * Bpr31 + Api34 * Bpr41;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31 + Apr24 * Bpi41
+ Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31 + Api24 * Bpr41;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31 + Apr14 * Bpi41
+ Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31 + Api14 * Bpr41;
}
} else {
switch( n ) {
case 4:
Cpr[11]= Apr31 * Bpr14 + Apr32 * Bpr24 + Apr33 * Bpr34 + Apr34 * Bpr44;
Cpr[10]= Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34 + Apr24 * Bpr44;
Cpr[9] = Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34 + Apr14 * Bpr44;
case 3:
Cpr[8] = Apr31 * Bpr13 + Apr32 * Bpr23 + Apr33 * Bpr33 + Apr34 * Bpr43;
Cpr[7] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33 + Apr24 * Bpr43;
Cpr[6] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33 + Apr14 * Bpr43;
case 2:
Cpr[5] = Apr31 * Bpr12 + Apr32 * Bpr22 + Apr33 * Bpr32 + Apr34 * Bpr42;
Cpr[4] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32 + Apr24 * Bpr42;
Cpr[3] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32 + Apr14 * Bpr42;
case 1:
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21 + Apr33 * Bpr31 + Apr34 * Bpr41;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31 + Apr24 * Bpr41;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31 + Apr14 * Bpr41;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[11]= Api31 * Bpr14 + Api32 * Bpr24 + Api33 * Bpr34 + Api34 * Bpr44;
Cpi[10]= Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34 + Api24 * Bpr44;
Cpi[9] = Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34 + Api14 * Bpr44;
case 3:
Cpi[8] = Api31 * Bpr13 + Api32 * Bpr23 + Api33 * Bpr33 + Api34 * Bpr43;
Cpi[7] = Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33 + Api24 * Bpr43;
Cpi[6] = Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33 + Api14 * Bpr43;
case 2:
Cpi[5] = Api31 * Bpr12 + Api32 * Bpr22 + Api33 * Bpr32 + Api34 * Bpr42;
Cpi[4] = Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32 + Api24 * Bpr42;
Cpi[3] = Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32 + Api14 * Bpr42;
case 1:
Cpi[2] = Api31 * Bpr11 + Api32 * Bpr21 + Api33 * Bpr31 + Api34 * Bpr41;
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31 + Api24 * Bpr41;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31 + Api14 * Bpr41;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[11]= Apr31 * Bpi14 + Apr32 * Bpi24 + Apr33 * Bpi34 + Apr34 * Bpi44;
Cpi[10]= Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34 + Apr24 * Bpi44;
Cpi[9] = Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34 + Apr14 * Bpi44;
case 3:
Cpi[8] = Apr31 * Bpi13 + Apr32 * Bpi23 + Apr33 * Bpi33 + Apr34 * Bpi43;
Cpi[7] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33 + Apr24 * Bpi43;
Cpi[6] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33 + Apr14 * Bpi43;
case 2:
Cpi[5] = Apr31 * Bpi12 + Apr32 * Bpi22 + Apr33 * Bpi32 + Apr34 * Bpi42;
Cpi[4] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32 + Apr24 * Bpi42;
Cpi[3] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32 + Apr14 * Bpi42;
case 1:
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21 + Apr33 * Bpi31 + Apr34 * Bpi41;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31 + Apr24 * Bpi41;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31 + Apr14 * Bpi41;
}
}
}
break;
}
break;
case 4: /* (4 x k)*(k x n) */
switch( k ) {
case 1: /* (4 x 1)*(1 x n) */
Apr11 = Apr[0];
Apr21 = Apr[1];
Apr31 = Apr[2];
Apr41 = Apr[3];
if( Api ) {
if( transa == 'N' || transa == 'T' ) {
Api11 = Api[0];
Api21 = Api[1];
Api31 = Api[2];
Api41 = Api[3];
} else { /* transa == 'G' || transa == 'C' */
Api11 =-Api[0];
Api21 =-Api[1];
Api31 =-Api[2];
Api41 =-Api[3];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[15]= Apr41 * Bpr14
- Api41 * Bpi14;
Cpr[14]= Apr31 * Bpr14
- Api31 * Bpi14;
Cpr[13]= Apr21 * Bpr14
- Api21 * Bpi14;
Cpr[12]= Apr11 * Bpr14
- Api11 * Bpi14;
Cpi[15]= Apr41 * Bpi14
+ Api41 * Bpr14;
Cpi[14]= Apr31 * Bpi14
+ Api31 * Bpr14;
Cpi[13]= Apr21 * Bpi14
+ Api21 * Bpr14;
Cpi[12]= Apr11 * Bpi14
+ Api11 * Bpr14;
case 3:
Cpr[11]= Apr41 * Bpr13
- Api41 * Bpi13;
Cpr[10]= Apr31 * Bpr13
- Api31 * Bpi13;
Cpr[9] = Apr21 * Bpr13
- Api21 * Bpi13;
Cpr[8] = Apr11 * Bpr13
- Api11 * Bpi13;
Cpi[11]= Apr41 * Bpi13
+ Api41 * Bpr13;
Cpi[10]= Apr31 * Bpi13
+ Api31 * Bpr13;
Cpi[9] = Apr21 * Bpi13
+ Api21 * Bpr13;
Cpi[8] = Apr11 * Bpi13
+ Api11 * Bpr13;
case 2:
Cpr[7] = Apr41 * Bpr12
- Api41 * Bpi12;
Cpr[6] = Apr31 * Bpr12
- Api31 * Bpi12;
Cpr[5] = Apr21 * Bpr12
- Api21 * Bpi12;
Cpr[4] = Apr11 * Bpr12
- Api11 * Bpi12;
Cpi[7] = Apr41 * Bpi12
+ Api41 * Bpr12;
Cpi[6] = Apr31 * Bpi12
+ Api31 * Bpr12;
Cpi[5] = Apr21 * Bpi12
+ Api21 * Bpr12;
Cpi[4] = Apr11 * Bpi12
+ Api11 * Bpr12;
case 1:
Cpr[3] = Apr41 * Bpr11
- Api41 * Bpi11;
Cpr[2] = Apr31 * Bpr11
- Api31 * Bpi11;
Cpr[1] = Apr21 * Bpr11
- Api21 * Bpi11;
Cpr[0] = Apr11 * Bpr11
- Api11 * Bpi11;
Cpi[3] = Apr41 * Bpi11
+ Api41 * Bpr11;
Cpi[2] = Apr31 * Bpi11
+ Api31 * Bpr11;
Cpi[1] = Apr21 * Bpi11
+ Api21 * Bpr11;
Cpi[0] = Apr11 * Bpi11
+ Api11 * Bpr11;
}
} else {
switch( n ) {
case 4:
Cpr[15]= Apr41 * Bpr14;
Cpr[14]= Apr31 * Bpr14;
Cpr[13]= Apr21 * Bpr14;
Cpr[12]= Apr11 * Bpr14;
case 3:
Cpr[11]= Apr41 * Bpr13;
Cpr[10]= Apr31 * Bpr13;
Cpr[9] = Apr21 * Bpr13;
Cpr[8] = Apr11 * Bpr13;
case 2:
Cpr[7] = Apr41 * Bpr12;
Cpr[6] = Apr31 * Bpr12;
Cpr[5] = Apr21 * Bpr12;
Cpr[4] = Apr11 * Bpr12;
case 1:
Cpr[3] = Apr41 * Bpr11;
Cpr[2] = Apr31 * Bpr11;
Cpr[1] = Apr21 * Bpr11;
Cpr[0] = Apr11 * Bpr11;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[15]= Api41 * Bpr14;
Cpi[14]= Api31 * Bpr14;
Cpi[13]= Api21 * Bpr14;
Cpi[12]= Api11 * Bpr14;
case 3:
Cpi[11]= Api41 * Bpr13;
Cpi[10]= Api31 * Bpr13;
Cpi[9] = Api21 * Bpr13;
Cpi[8] = Api11 * Bpr13;
case 2:
Cpi[7] = Api41 * Bpr12;
Cpi[6] = Api31 * Bpr12;
Cpi[5] = Api21 * Bpr12;
Cpi[4] = Api11 * Bpr12;
case 1:
Cpi[3] = Api41 * Bpr11;
Cpi[2] = Api31 * Bpr11;
Cpi[1] = Api21 * Bpr11;
Cpi[0] = Api11 * Bpr11;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[15]= Apr41 * Bpi14;
Cpi[14]= Apr31 * Bpi14;
Cpi[13]= Apr21 * Bpi14;
Cpi[12]= Apr11 * Bpi14;
case 3:
Cpi[11]= Apr41 * Bpi13;
Cpi[10]= Apr31 * Bpi13;
Cpi[9] = Apr21 * Bpi13;
Cpi[8] = Apr11 * Bpi13;
case 2:
Cpi[7] = Apr41 * Bpi12;
Cpi[6] = Apr31 * Bpi12;
Cpi[5] = Apr21 * Bpi12;
Cpi[4] = Apr11 * Bpi12;
case 1:
Cpi[3] = Apr41 * Bpi11;
Cpi[2] = Apr31 * Bpi11;
Cpi[1] = Apr21 * Bpi11;
Cpi[0] = Apr11 * Bpi11;
}
}
}
break;
case 2: /* (4 x 2)*(2 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[4];
Apr21 = Apr[1]; Apr22 = Apr[5];
Apr31 = Apr[2]; Apr32 = Apr[6];
Apr41 = Apr[3]; Apr42 = Apr[7];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1];
Apr21 = Apr[2]; Apr22 = Apr[3];
Apr31 = Apr[4]; Apr32 = Apr[5];
Apr41 = Apr[6]; Apr42 = Apr[7];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[4];
Api21 = Api[1]; Api22 = Api[5];
Api31 = Api[2]; Api32 = Api[6];
Api41 = Api[3]; Api42 = Api[7];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1];
Api21 = Api[2]; Api22 = Api[3];
Api31 = Api[4]; Api32 = Api[5];
Api41 = Api[6]; Api42 = Api[7];
} else if( transa == 'C' ) {
Api11 = -Api[0]; Api12 = -Api[1];
Api21 = -Api[2]; Api22 = -Api[3];
Api31 = -Api[4]; Api32 = -Api[5];
Api41 = -Api[6]; Api42 = -Api[7];
} else {/* transa == 'G' */
Api11 = -Api[0]; Api12 = -Api[4];
Api21 = -Api[1]; Api22 = -Api[5];
Api31 = -Api[2]; Api32 = -Api[6];
Api41 = -Api[3]; Api42 = -Api[7];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[15]= Apr41 * Bpr14 + Apr42 * Bpr24
- Api41 * Bpi14 - Api42 * Bpi24;
Cpr[14]= Apr31 * Bpr14 + Apr32 * Bpr24
- Api31 * Bpi14 - Api32 * Bpi24;
Cpr[13]= Apr21 * Bpr14 + Apr22 * Bpr24
- Api21 * Bpi14 - Api22 * Bpi24;
Cpr[12]= Apr11 * Bpr14 + Apr12 * Bpr24
- Api11 * Bpi14 - Api12 * Bpi24;
Cpi[15]= Apr41 * Bpi14 + Apr42 * Bpi24
+ Api41 * Bpr14 + Api42 * Bpr24;
Cpi[14]= Apr31 * Bpi14 + Apr32 * Bpi24
+ Api31 * Bpr14 + Api32 * Bpr24;
Cpi[13]= Apr21 * Bpi14 + Apr22 * Bpi24
+ Api21 * Bpr14 + Api22 * Bpr24;
Cpi[12]= Apr11 * Bpi14 + Apr12 * Bpi24
+ Api11 * Bpr14 + Api12 * Bpr24;
case 3:
Cpr[11]= Apr41 * Bpr13 + Apr42 * Bpr23
- Api41 * Bpi13 - Api42 * Bpi23;
Cpr[10]= Apr31 * Bpr13 + Apr32 * Bpr23
- Api31 * Bpi13 - Api32 * Bpi23;
Cpr[9] = Apr21 * Bpr13 + Apr22 * Bpr23
- Api21 * Bpi13 - Api22 * Bpi23;
Cpr[8] = Apr11 * Bpr13 + Apr12 * Bpr23
- Api11 * Bpi13 - Api12 * Bpi23;
Cpi[11]= Apr41 * Bpi13 + Apr42 * Bpi23
+ Api41 * Bpr13 + Api42 * Bpr23;
Cpi[10]= Apr31 * Bpi13 + Apr32 * Bpi23
+ Api31 * Bpr13 + Api32 * Bpr23;
Cpi[9] = Apr21 * Bpi13 + Apr22 * Bpi23
+ Api21 * Bpr13 + Api22 * Bpr23;
Cpi[8] = Apr11 * Bpi13 + Apr12 * Bpi23
+ Api11 * Bpr13 + Api12 * Bpr23;
case 2:
Cpr[7] = Apr41 * Bpr12 + Apr42 * Bpr22
- Api41 * Bpi12 - Api42 * Bpi22;
Cpr[6] = Apr31 * Bpr12 + Apr32 * Bpr22
- Api31 * Bpi12 - Api32 * Bpi22;
Cpr[5] = Apr21 * Bpr12 + Apr22 * Bpr22
- Api21 * Bpi12 - Api22 * Bpi22;
Cpr[4] = Apr11 * Bpr12 + Apr12 * Bpr22
- Api11 * Bpi12 - Api12 * Bpi22;
Cpi[7] = Apr41 * Bpi12 + Apr42 * Bpi22
+ Api41 * Bpr12 + Api42 * Bpr22;
Cpi[6] = Apr31 * Bpi12 + Apr32 * Bpi22
+ Api31 * Bpr12 + Api32 * Bpr22;
Cpi[5] = Apr21 * Bpi12 + Apr22 * Bpi22
+ Api21 * Bpr12 + Api22 * Bpr22;
Cpi[4] = Apr11 * Bpi12 + Apr12 * Bpi22
+ Api11 * Bpr12 + Api12 * Bpr22;
case 1:
Cpr[3] = Apr41 * Bpr11 + Apr42 * Bpr21
- Api41 * Bpi11 - Api42 * Bpi21;
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21
- Api31 * Bpi11 - Api32 * Bpi21;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21
- Api21 * Bpi11 - Api22 * Bpi21;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21
- Api11 * Bpi11 - Api12 * Bpi21;
Cpi[3] = Apr41 * Bpi11 + Apr42 * Bpi21
+ Api41 * Bpr11 + Api42 * Bpr21;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21
+ Api31 * Bpr11 + Api32 * Bpr21;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21
+ Api21 * Bpr11 + Api22 * Bpr21;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21
+ Api11 * Bpr11 + Api12 * Bpr21;
}
} else {
switch( n ) {
case 4:
Cpr[15]= Apr41 * Bpr14 + Apr42 * Bpr24;
Cpr[14]= Apr31 * Bpr14 + Apr32 * Bpr24;
Cpr[13]= Apr21 * Bpr14 + Apr22 * Bpr24;
Cpr[12]= Apr11 * Bpr14 + Apr12 * Bpr24;
case 3:
Cpr[11]= Apr41 * Bpr13 + Apr42 * Bpr23;
Cpr[10]= Apr31 * Bpr13 + Apr32 * Bpr23;
Cpr[9] = Apr21 * Bpr13 + Apr22 * Bpr23;
Cpr[8] = Apr11 * Bpr13 + Apr12 * Bpr23;
case 2:
Cpr[7] = Apr41 * Bpr12 + Apr42 * Bpr22;
Cpr[6] = Apr31 * Bpr12 + Apr32 * Bpr22;
Cpr[5] = Apr21 * Bpr12 + Apr22 * Bpr22;
Cpr[4] = Apr11 * Bpr12 + Apr12 * Bpr22;
case 1:
Cpr[3] = Apr41 * Bpr11 + Apr42 * Bpr21;
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[15]= Api41 * Bpr14 + Api42 * Bpr24;
Cpi[14]= Api31 * Bpr14 + Api32 * Bpr24;
Cpi[13]= Api21 * Bpr14 + Api22 * Bpr24;
Cpi[12]= Api11 * Bpr14 + Api12 * Bpr24;
case 3:
Cpi[11]= Api41 * Bpr13 + Api42 * Bpr23;
Cpi[10]= Api31 * Bpr13 + Api32 * Bpr23;
Cpi[9] = Api21 * Bpr13 + Api22 * Bpr23;
Cpi[8] = Api11 * Bpr13 + Api12 * Bpr23;
case 2:
Cpi[7] = Api41 * Bpr12 + Api42 * Bpr22;
Cpi[6] = Api31 * Bpr12 + Api32 * Bpr22;
Cpi[5] = Api21 * Bpr12 + Api22 * Bpr22;
Cpi[4] = Api11 * Bpr12 + Api12 * Bpr22;
case 1:
Cpi[3] = Api41 * Bpr11 + Api42 * Bpr21;
Cpi[2] = Api31 * Bpr11 + Api32 * Bpr21;
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[15]= Apr41 * Bpi14 + Apr42 * Bpi24;
Cpi[14]= Apr31 * Bpi14 + Apr32 * Bpi24;
Cpi[13]= Apr21 * Bpi14 + Apr22 * Bpi24;
Cpi[12]= Apr11 * Bpi14 + Apr12 * Bpi24;
case 3:
Cpi[11]= Apr41 * Bpi13 + Apr42 * Bpi23;
Cpi[10]= Apr31 * Bpi13 + Apr32 * Bpi23;
Cpi[9] = Apr21 * Bpi13 + Apr22 * Bpi23;
Cpi[8] = Apr11 * Bpi13 + Apr12 * Bpi23;
case 2:
Cpi[7] = Apr41 * Bpi12 + Apr42 * Bpi22;
Cpi[6] = Apr31 * Bpi12 + Apr32 * Bpi22;
Cpi[5] = Apr21 * Bpi12 + Apr22 * Bpi22;
Cpi[4] = Apr11 * Bpi12 + Apr12 * Bpi22;
case 1:
Cpi[3] = Apr41 * Bpi11 + Apr42 * Bpi21;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21;
}
}
}
break;
case 3: /* (4 x 3)*(3 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[4]; Apr13 = Apr[8];
Apr21 = Apr[1]; Apr22 = Apr[5]; Apr23 = Apr[9];
Apr31 = Apr[2]; Apr32 = Apr[6]; Apr33 = Apr[10];
Apr41 = Apr[3]; Apr42 = Apr[7]; Apr43 = Apr[11];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1]; Apr13 = Apr[2];
Apr21 = Apr[3]; Apr22 = Apr[4]; Apr23 = Apr[5];
Apr31 = Apr[6]; Apr32 = Apr[7]; Apr33 = Apr[8];
Apr41 = Apr[9]; Apr42 = Apr[10];Apr43 = Apr[11];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[4]; Api13 = Api[8];
Api21 = Api[1]; Api22 = Api[5]; Api23 = Api[9];
Api31 = Api[2]; Api32 = Api[6]; Api33 = Api[10];
Api41 = Api[3]; Api42 = Api[7]; Api43 = Api[11];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1]; Api13 = Api[2];
Api21 = Api[3]; Api22 = Api[4]; Api23 = Api[5];
Api31 = Api[6]; Api32 = Api[7]; Api33 = Api[8];
Api41 = Api[9]; Api42 = Api[10];Api43 = Api[11];
} else if( transa == 'C' ) {
Api11 =-Api[0]; Api12 =-Api[1]; Api13 =-Api[2];
Api21 =-Api[3]; Api22 =-Api[4]; Api23 =-Api[5];
Api31 =-Api[6]; Api32 =-Api[7]; Api33 =-Api[8];
Api41 =-Api[9]; Api42 =-Api[10];Api43 =-Api[11];
} else {/* transa == 'G' */
Api11 =-Api[0]; Api12 =-Api[4]; Api13 =-Api[8];
Api21 =-Api[1]; Api22 =-Api[5]; Api23 =-Api[9];
Api31 =-Api[2]; Api32 =-Api[6]; Api33 =-Api[10];
Api41 =-Api[3]; Api42 =-Api[7]; Api43 =-Api[11];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[15]= Apr41 * Bpr14 + Apr42 * Bpr24 + Apr43 * Bpr34
- Api41 * Bpi14 - Api42 * Bpi24 - Api43 * Bpi34;
Cpr[14]= Apr31 * Bpr14 + Apr32 * Bpr24 + Apr33 * Bpr34
- Api31 * Bpi14 - Api32 * Bpi24 - Api33 * Bpi34;
Cpr[13]= Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34
- Api21 * Bpi14 - Api22 * Bpi24 - Api23 * Bpi34;
Cpr[12]= Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34
- Api11 * Bpi14 - Api12 * Bpi24 - Api13 * Bpi34;
Cpi[15]= Apr41 * Bpi14 + Apr42 * Bpi24 + Apr43 * Bpi34
+ Api41 * Bpr14 + Api42 * Bpr24 + Api43 * Bpr34;
Cpi[14]= Apr31 * Bpi14 + Apr32 * Bpi24 + Apr33 * Bpi34
+ Api31 * Bpr14 + Api32 * Bpr24 + Api33 * Bpr34;
Cpi[13]= Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34
+ Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34;
Cpi[12]= Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34
+ Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34;
case 3:
Cpr[11]= Apr41 * Bpr13 + Apr42 * Bpr23 + Apr43 * Bpr33
- Api41 * Bpi13 - Api42 * Bpi23 - Api43 * Bpi33;
Cpr[10]= Apr31 * Bpr13 + Apr32 * Bpr23 + Apr33 * Bpr33
- Api31 * Bpi13 - Api32 * Bpi23 - Api33 * Bpi33;
Cpr[9] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33
- Api21 * Bpi13 - Api22 * Bpi23 - Api23 * Bpi33;
Cpr[8] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33
- Api11 * Bpi13 - Api12 * Bpi23 - Api13 * Bpi33;
Cpi[11]= Apr41 * Bpi13 + Apr42 * Bpi23 + Apr43 * Bpi33
+ Api41 * Bpr13 + Api42 * Bpr23 + Api43 * Bpr33;
Cpi[10]= Apr31 * Bpi13 + Apr32 * Bpi23 + Apr33 * Bpi33
+ Api31 * Bpr13 + Api32 * Bpr23 + Api33 * Bpr33;
Cpi[9] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33
+ Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33;
Cpi[8] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33
+ Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33;
case 2:
Cpr[7] = Apr41 * Bpr12 + Apr42 * Bpr22 + Apr43 * Bpr32
- Api41 * Bpi12 - Api42 * Bpi22 - Api43 * Bpi32;
Cpr[6] = Apr31 * Bpr12 + Apr32 * Bpr22 + Apr33 * Bpr32
- Api31 * Bpi12 - Api32 * Bpi22 - Api33 * Bpi32;
Cpr[5] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32
- Api21 * Bpi12 - Api22 * Bpi22 - Api23 * Bpi32;
Cpr[4] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32
- Api11 * Bpi12 - Api12 * Bpi22 - Api13 * Bpi32;
Cpi[7] = Apr41 * Bpi12 + Apr42 * Bpi22 + Apr43 * Bpi32
+ Api41 * Bpr12 + Api42 * Bpr22 + Api43 * Bpr32;
Cpi[6] = Apr31 * Bpi12 + Apr32 * Bpi22 + Apr33 * Bpi32
+ Api31 * Bpr12 + Api32 * Bpr22 + Api33 * Bpr32;
Cpi[5] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32
+ Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32;
Cpi[4] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32
+ Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32;
case 1:
Cpr[3] = Apr41 * Bpr11 + Apr42 * Bpr21 + Apr43 * Bpr31
- Api41 * Bpi11 - Api42 * Bpi21 - Api43 * Bpi31;
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21 + Apr33 * Bpr31
- Api31 * Bpi11 - Api32 * Bpi21 - Api33 * Bpi31;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31
- Api21 * Bpi11 - Api22 * Bpi21 - Api23 * Bpi31;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31
- Api11 * Bpi11 - Api12 * Bpi21 - Api13 * Bpi31;
Cpi[3] = Apr41 * Bpi11 + Apr42 * Bpi21 + Apr43 * Bpi31
+ Api41 * Bpr11 + Api42 * Bpr21 + Api43 * Bpr31;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21 + Apr33 * Bpi31
+ Api31 * Bpr11 + Api32 * Bpr21 + Api33 * Bpr31;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31
+ Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31
+ Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31;
}
} else {
switch( n ) {
case 4:
Cpr[15]= Apr41 * Bpr14 + Apr42 * Bpr24 + Apr43 * Bpr34;
Cpr[14]= Apr31 * Bpr14 + Apr32 * Bpr24 + Apr33 * Bpr34;
Cpr[13]= Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34;
Cpr[12]= Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34;
case 3:
Cpr[11]= Apr41 * Bpr13 + Apr42 * Bpr23 + Apr43 * Bpr33;
Cpr[10]= Apr31 * Bpr13 + Apr32 * Bpr23 + Apr33 * Bpr33;
Cpr[9] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33;
Cpr[8] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33;
case 2:
Cpr[7] = Apr41 * Bpr12 + Apr42 * Bpr22 + Apr43 * Bpr32;
Cpr[6] = Apr31 * Bpr12 + Apr32 * Bpr22 + Apr33 * Bpr32;
Cpr[5] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32;
Cpr[4] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32;
case 1:
Cpr[3] = Apr41 * Bpr11 + Apr42 * Bpr21 + Apr43 * Bpr31;
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21 + Apr33 * Bpr31;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[15]= Api41 * Bpr14 + Api42 * Bpr24 + Api43 * Bpr34;
Cpi[14]= Api31 * Bpr14 + Api32 * Bpr24 + Api33 * Bpr34;
Cpi[13]= Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34;
Cpi[12]= Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34;
case 3:
Cpi[11]= Api41 * Bpr13 + Api42 * Bpr23 + Api43 * Bpr33;
Cpi[10]= Api31 * Bpr13 + Api32 * Bpr23 + Api33 * Bpr33;
Cpi[9] = Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33;
Cpi[8] = Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33;
case 2:
Cpi[7] = Api41 * Bpr12 + Api42 * Bpr22 + Api43 * Bpr32;
Cpi[6] = Api31 * Bpr12 + Api32 * Bpr22 + Api33 * Bpr32;
Cpi[5] = Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32;
Cpi[4] = Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32;
case 1:
Cpi[3] = Api41 * Bpr11 + Api42 * Bpr21 + Api43 * Bpr31;
Cpi[2] = Api31 * Bpr11 + Api32 * Bpr21 + Api33 * Bpr31;
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[15]= Apr41 * Bpi14 + Apr42 * Bpi24 + Apr43 * Bpi34;
Cpi[14]= Apr31 * Bpi14 + Apr32 * Bpi24 + Apr33 * Bpi34;
Cpi[13]= Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34;
Cpi[12]= Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34;
case 3:
Cpi[11]= Apr41 * Bpi13 + Apr42 * Bpi23 + Apr43 * Bpi33;
Cpi[10]= Apr31 * Bpi13 + Apr32 * Bpi23 + Apr33 * Bpi33;
Cpi[9] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33;
Cpi[8] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33;
case 2:
Cpi[7] = Apr41 * Bpi12 + Apr42 * Bpi22 + Apr43 * Bpi32;
Cpi[6] = Apr31 * Bpi12 + Apr32 * Bpi22 + Apr33 * Bpi32;
Cpi[5] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32;
Cpi[4] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32;
case 1:
Cpi[3] = Apr41 * Bpi11 + Apr42 * Bpi21 + Apr43 * Bpi31;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21 + Apr33 * Bpi31;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31;
}
}
}
break;
case 4: /* (4 x 4)*(4 x n) */
if( transa == 'N' || transa == 'G' ) {
Apr11 = Apr[0]; Apr12 = Apr[4]; Apr13 = Apr[8]; Apr14 = Apr[12];
Apr21 = Apr[1]; Apr22 = Apr[5]; Apr23 = Apr[9]; Apr24 = Apr[13];
Apr31 = Apr[2]; Apr32 = Apr[6]; Apr33 = Apr[10];Apr34 = Apr[14];
Apr41 = Apr[3]; Apr42 = Apr[7]; Apr43 = Apr[11];Apr44 = Apr[15];
} else { /* transa == 'T' || transa == 'C' */
Apr11 = Apr[0]; Apr12 = Apr[1]; Apr13 = Apr[2]; Apr14 = Apr[3];
Apr21 = Apr[4]; Apr22 = Apr[5]; Apr23 = Apr[6]; Apr24 = Apr[7];
Apr31 = Apr[8]; Apr32 = Apr[9]; Apr33 = Apr[10];Apr34 = Apr[11];
Apr41 = Apr[12];Apr42 = Apr[13];Apr43 = Apr[14];Apr44 = Apr[15];
}
if( Api ) {
if( transa == 'N' ) {
Api11 = Api[0]; Api12 = Api[4]; Api13 = Api[8]; Api14 = Api[12];
Api21 = Api[1]; Api22 = Api[5]; Api23 = Api[9]; Api24 = Api[13];
Api31 = Api[2]; Api32 = Api[6]; Api33 = Api[10];Api34 = Api[14];
Api41 = Api[3]; Api42 = Api[7]; Api43 = Api[11];Api44 = Api[15];
} else if( transa == 'T' ) {
Api11 = Api[0]; Api12 = Api[1]; Api13 = Api[2]; Api14 = Api[3];
Api21 = Api[4]; Api22 = Api[5]; Api23 = Api[6]; Api24 = Api[7];
Api31 = Api[8]; Api32 = Api[9]; Api33 = Api[10];Api34 = Api[11];
Api41 = Api[12];Api42 = Api[13];Api43 = Api[14];Api44 = Api[15];
} else if( transa == 'G' ) {
Api11 =-Api[0]; Api12 =-Api[4]; Api13 =-Api[8]; Api14 =-Api[12];
Api21 =-Api[1]; Api22 =-Api[5]; Api23 =-Api[9]; Api24 =-Api[13];
Api31 =-Api[2]; Api32 =-Api[6]; Api33 =-Api[10];Api34 =-Api[14];
Api41 =-Api[3]; Api42 =-Api[7]; Api43 =-Api[11];Api44 =-Api[15];
} else { /* transa == 'C' */
Api11 =-Api[0]; Api12 =-Api[1]; Api13 =-Api[2]; Api14 =-Api[3];
Api21 =-Api[4]; Api22 =-Api[5]; Api23 =-Api[6]; Api24 =-Api[7];
Api31 =-Api[8]; Api32 =-Api[9]; Api33 =-Api[10];Api34 =-Api[11];
Api41 =-Api[12];Api42 =-Api[13];Api43 =-Api[14];Api44 =-Api[15];
}
}
if( Api && Bpi ) {
switch( n ) {
case 4:
Cpr[15]= Apr41 * Bpr14 + Apr42 * Bpr24 + Apr43 * Bpr34 + Apr44 * Bpr44
- Api41 * Bpi14 - Api42 * Bpi24 - Api43 * Bpi34 - Api44 * Bpi44;
Cpr[14]= Apr31 * Bpr14 + Apr32 * Bpr24 + Apr33 * Bpr34 + Apr34 * Bpr44
- Api31 * Bpi14 - Api32 * Bpi24 - Api33 * Bpi34 - Api34 * Bpi44;
Cpr[13]= Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34 + Apr24 * Bpr44
- Api21 * Bpi14 - Api22 * Bpi24 - Api23 * Bpi34 - Api24 * Bpi44;
Cpr[12]= Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34 + Apr14 * Bpr44
- Api11 * Bpi14 - Api12 * Bpi24 - Api13 * Bpi34 - Api14 * Bpi44;
Cpi[15]= Apr41 * Bpi14 + Apr42 * Bpi24 + Apr43 * Bpi34 + Apr44 * Bpi44
+ Api41 * Bpr14 + Api42 * Bpr24 + Api43 * Bpr34 + Api44 * Bpr44;
Cpi[14]= Apr31 * Bpi14 + Apr32 * Bpi24 + Apr33 * Bpi34 + Apr34 * Bpi44
+ Api31 * Bpr14 + Api32 * Bpr24 + Api33 * Bpr34 + Api34 * Bpr44;
Cpi[13]= Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34 + Apr24 * Bpi44
+ Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34 + Api24 * Bpr44;
Cpi[12]= Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34 + Apr14 * Bpi44
+ Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34 + Api14 * Bpr44;
case 3:
Cpr[11]= Apr41 * Bpr13 + Apr42 * Bpr23 + Apr43 * Bpr33 + Apr44 * Bpr43
- Api41 * Bpi13 - Api42 * Bpi23 - Api43 * Bpi33 - Api44 * Bpi43;
Cpr[10]= Apr31 * Bpr13 + Apr32 * Bpr23 + Apr33 * Bpr33 + Apr34 * Bpr43
- Api31 * Bpi13 - Api32 * Bpi23 - Api33 * Bpi33 - Api34 * Bpi43;
Cpr[9] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33 + Apr24 * Bpr43
- Api21 * Bpi13 - Api22 * Bpi23 - Api23 * Bpi33 - Api24 * Bpi43;
Cpr[8] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33 + Apr14 * Bpr43
- Api11 * Bpi13 - Api12 * Bpi23 - Api13 * Bpi33 - Api14 * Bpi43;
Cpi[11]= Apr41 * Bpi13 + Apr42 * Bpi23 + Apr43 * Bpi33 + Apr44 * Bpi43
+ Api41 * Bpr13 + Api42 * Bpr23 + Api43 * Bpr33 + Api44 * Bpr43;
Cpi[10]= Apr31 * Bpi13 + Apr32 * Bpi23 + Apr33 * Bpi33 + Apr34 * Bpi43
+ Api31 * Bpr13 + Api32 * Bpr23 + Api33 * Bpr33 + Api34 * Bpr43;
Cpi[9] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33 + Apr24 * Bpi43
+ Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33 + Api24 * Bpr43;
Cpi[8] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33 + Apr14 * Bpi43
+ Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33 + Api14 * Bpr43;
case 2:
Cpr[7] = Apr41 * Bpr12 + Apr42 * Bpr22 + Apr43 * Bpr32 + Apr44 * Bpr42
- Api41 * Bpi12 - Api42 * Bpi22 - Api43 * Bpi32 - Api44 * Bpi42;
Cpr[6] = Apr31 * Bpr12 + Apr32 * Bpr22 + Apr33 * Bpr32 + Apr34 * Bpr42
- Api31 * Bpi12 - Api32 * Bpi22 - Api33 * Bpi32 - Api34 * Bpi42;
Cpr[5] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32 + Apr24 * Bpr42
- Api21 * Bpi12 - Api22 * Bpi22 - Api23 * Bpi32 - Api24 * Bpi42;
Cpr[4] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32 + Apr14 * Bpr42
- Api11 * Bpi12 - Api12 * Bpi22 - Api13 * Bpi32 - Api14 * Bpi42;
Cpi[7] = Apr41 * Bpi12 + Apr42 * Bpi22 + Apr43 * Bpi32 + Apr44 * Bpi42
+ Api41 * Bpr12 + Api42 * Bpr22 + Api43 * Bpr32 + Api44 * Bpr42;
Cpi[6] = Apr31 * Bpi12 + Apr32 * Bpi22 + Apr33 * Bpi32 + Apr34 * Bpi42
+ Api31 * Bpr12 + Api32 * Bpr22 + Api33 * Bpr32 + Api34 * Bpr42;
Cpi[5] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32 + Apr24 * Bpi42
+ Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32 + Api24 * Bpr42;
Cpi[4] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32 + Apr14 * Bpi42
+ Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32 + Api14 * Bpr42;
case 1:
Cpr[3] = Apr41 * Bpr11 + Apr42 * Bpr21 + Apr43 * Bpr31 + Apr44 * Bpr41
- Api41 * Bpi11 - Api42 * Bpi21 - Api43 * Bpi31 - Api44 * Bpi41;
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21 + Apr33 * Bpr31 + Apr34 * Bpr41
- Api31 * Bpi11 - Api32 * Bpi21 - Api33 * Bpi31 - Api34 * Bpi41;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31 + Apr24 * Bpr41
- Api21 * Bpi11 - Api22 * Bpi21 - Api23 * Bpi31 - Api24 * Bpi41;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31 + Apr14 * Bpr41
- Api11 * Bpi11 - Api12 * Bpi21 - Api13 * Bpi31 - Api14 * Bpi41;
Cpi[3] = Apr41 * Bpi11 + Apr42 * Bpi21 + Apr43 * Bpi31 + Apr44 * Bpi41
+ Api41 * Bpr11 + Api42 * Bpr21 + Api43 * Bpr31 + Api44 * Bpr41;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21 + Apr33 * Bpi31 + Apr34 * Bpi41
+ Api31 * Bpr11 + Api32 * Bpr21 + Api33 * Bpr31 + Api34 * Bpr41;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31 + Apr24 * Bpi41
+ Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31 + Api24 * Bpr41;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31 + Apr14 * Bpi41
+ Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31 + Api14 * Bpr41;
}
} else {
switch( n ) {
case 4:
Cpr[15]= Apr41 * Bpr14 + Apr42 * Bpr24 + Apr43 * Bpr34 + Apr44 * Bpr44;
Cpr[14]= Apr31 * Bpr14 + Apr32 * Bpr24 + Apr33 * Bpr34 + Apr34 * Bpr44;
Cpr[13]= Apr21 * Bpr14 + Apr22 * Bpr24 + Apr23 * Bpr34 + Apr24 * Bpr44;
Cpr[12]= Apr11 * Bpr14 + Apr12 * Bpr24 + Apr13 * Bpr34 + Apr14 * Bpr44;
case 3:
Cpr[11]= Apr41 * Bpr13 + Apr42 * Bpr23 + Apr43 * Bpr33 + Apr44 * Bpr43;
Cpr[10]= Apr31 * Bpr13 + Apr32 * Bpr23 + Apr33 * Bpr33 + Apr34 * Bpr43;
Cpr[9] = Apr21 * Bpr13 + Apr22 * Bpr23 + Apr23 * Bpr33 + Apr24 * Bpr43;
Cpr[8] = Apr11 * Bpr13 + Apr12 * Bpr23 + Apr13 * Bpr33 + Apr14 * Bpr43;
case 2:
Cpr[7] = Apr41 * Bpr12 + Apr42 * Bpr22 + Apr43 * Bpr32 + Apr44 * Bpr42;
Cpr[6] = Apr31 * Bpr12 + Apr32 * Bpr22 + Apr33 * Bpr32 + Apr34 * Bpr42;
Cpr[5] = Apr21 * Bpr12 + Apr22 * Bpr22 + Apr23 * Bpr32 + Apr24 * Bpr42;
Cpr[4] = Apr11 * Bpr12 + Apr12 * Bpr22 + Apr13 * Bpr32 + Apr14 * Bpr42;
case 1:
Cpr[3] = Apr41 * Bpr11 + Apr42 * Bpr21 + Apr43 * Bpr31 + Apr44 * Bpr41;
Cpr[2] = Apr31 * Bpr11 + Apr32 * Bpr21 + Apr33 * Bpr31 + Apr34 * Bpr41;
Cpr[1] = Apr21 * Bpr11 + Apr22 * Bpr21 + Apr23 * Bpr31 + Apr24 * Bpr41;
Cpr[0] = Apr11 * Bpr11 + Apr12 * Bpr21 + Apr13 * Bpr31 + Apr14 * Bpr41;
}
if( Api ) {
switch( n ) {
case 4:
Cpi[15]= Api41 * Bpr14 + Api42 * Bpr24 + Api43 * Bpr34 + Api44 * Bpr44;
Cpi[14]= Api31 * Bpr14 + Api32 * Bpr24 + Api33 * Bpr34 + Api34 * Bpr44;
Cpi[13]= Api21 * Bpr14 + Api22 * Bpr24 + Api23 * Bpr34 + Api24 * Bpr44;
Cpi[12]= Api11 * Bpr14 + Api12 * Bpr24 + Api13 * Bpr34 + Api14 * Bpr44;
case 3:
Cpi[11]= Api41 * Bpr13 + Api42 * Bpr23 + Api43 * Bpr33 + Api44 * Bpr43;
Cpi[10]= Api31 * Bpr13 + Api32 * Bpr23 + Api33 * Bpr33 + Api34 * Bpr43;
Cpi[9] = Api21 * Bpr13 + Api22 * Bpr23 + Api23 * Bpr33 + Api24 * Bpr43;
Cpi[8] = Api11 * Bpr13 + Api12 * Bpr23 + Api13 * Bpr33 + Api14 * Bpr43;
case 2:
Cpi[7] = Api41 * Bpr12 + Api42 * Bpr22 + Api43 * Bpr32 + Api44 * Bpr42;
Cpi[6] = Api31 * Bpr12 + Api32 * Bpr22 + Api33 * Bpr32 + Api34 * Bpr42;
Cpi[5] = Api21 * Bpr12 + Api22 * Bpr22 + Api23 * Bpr32 + Api24 * Bpr42;
Cpi[4] = Api11 * Bpr12 + Api12 * Bpr22 + Api13 * Bpr32 + Api14 * Bpr42;
case 1:
Cpi[3] = Api41 * Bpr11 + Api42 * Bpr21 + Api43 * Bpr31 + Api44 * Bpr41;
Cpi[2] = Api31 * Bpr11 + Api32 * Bpr21 + Api33 * Bpr31 + Api34 * Bpr41;
Cpi[1] = Api21 * Bpr11 + Api22 * Bpr21 + Api23 * Bpr31 + Api24 * Bpr41;
Cpi[0] = Api11 * Bpr11 + Api12 * Bpr21 + Api13 * Bpr31 + Api14 * Bpr41;
}
}
if( Bpi ) {
switch( n ) {
case 4:
Cpi[15]= Apr41 * Bpi14 + Apr42 * Bpi24 + Apr43 * Bpi34 + Apr44 * Bpi44;
Cpi[14]= Apr31 * Bpi14 + Apr32 * Bpi24 + Apr33 * Bpi34 + Apr34 * Bpi44;
Cpi[13]= Apr21 * Bpi14 + Apr22 * Bpi24 + Apr23 * Bpi34 + Apr24 * Bpi44;
Cpi[12]= Apr11 * Bpi14 + Apr12 * Bpi24 + Apr13 * Bpi34 + Apr14 * Bpi44;
case 3:
Cpi[11]= Apr41 * Bpi13 + Apr42 * Bpi23 + Apr43 * Bpi33 + Apr44 * Bpi43;
Cpi[10]= Apr31 * Bpi13 + Apr32 * Bpi23 + Apr33 * Bpi33 + Apr34 * Bpi43;
Cpi[9] = Apr21 * Bpi13 + Apr22 * Bpi23 + Apr23 * Bpi33 + Apr24 * Bpi43;
Cpi[8] = Apr11 * Bpi13 + Apr12 * Bpi23 + Apr13 * Bpi33 + Apr14 * Bpi43;
case 2:
Cpi[7] = Apr41 * Bpi12 + Apr42 * Bpi22 + Apr43 * Bpi32 + Apr44 * Bpi42;
Cpi[6] = Apr31 * Bpi12 + Apr32 * Bpi22 + Apr33 * Bpi32 + Apr34 * Bpi42;
Cpi[5] = Apr21 * Bpi12 + Apr22 * Bpi22 + Apr23 * Bpi32 + Apr24 * Bpi42;
Cpi[4] = Apr11 * Bpi12 + Apr12 * Bpi22 + Apr13 * Bpi32 + Apr14 * Bpi42;
case 1:
Cpi[3] = Apr41 * Bpi11 + Apr42 * Bpi21 + Apr43 * Bpi31 + Apr44 * Bpi41;
Cpi[2] = Apr31 * Bpi11 + Apr32 * Bpi21 + Apr33 * Bpi31 + Apr34 * Bpi41;
Cpi[1] = Apr21 * Bpi11 + Apr22 * Bpi21 + Apr23 * Bpi31 + Apr24 * Bpi41;
Cpi[0] = Apr11 * Bpi11 + Apr12 * Bpi21 + Apr13 * Bpi31 + Apr14 * Bpi41;
}
}
}
break;
}
break;
}
/*----------------------------------------------------------------------------
* Vector dot product (1 x K) * (K x 1)
*---------------------------------------------------------------------------- */
} else if( m == 1 && n == 1 ) {
z = RealKindDotProduct(k, Apr, Api, ai, Bpr, Bpi, bi, dot_method);
*Cpr = z.r;
if( Cpi ) {
*Cpi = z.i;
}
/*----------------------------------------------------------------------------
* Vector outer product (M x 1) * (1 x N)
*---------------------------------------------------------------------------- */
} else if( k == 1 ) {
RealKindOuterProduct(m, n, Apr, Api, transa, Bpr, Bpi, transb, Cpr, Cpi, outer_method);
/*----------------------------------------------------------------------------
* Matrix times vector (M x K) * (K x 1)
*---------------------------------------------------------------------------- */
} else if( n == 1 ) {
/*----------------------------------------------------------------------------
* If the first matrix is not transposed, use calls to xGEMV. Also use this
* method if running in the 'MATLAB' or 'BLAS' mode, or the number of processors
* is greater than 2.
*---------------------------------------------------------------------------- */
if( transa == 'N' || transa == 'G' || mtimesx_mode == MTIMESX_BLAS ||
mtimesx_mode == MTIMESX_MATLAB || omp_get_num_procs() > 2 ) {
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xGEMV) "\n");
}
if( transa == 'G' ) ptransa = 'N';
xGEMV(PTRANSA, M1, N1, ONE, Apr, LDA, Bpr, INCX, ZERO, Cpr, INCY);
if( Bpi ) {
xGEMV(PTRANSA, M1, N1, BI, Apr, LDA, Bpi, INCX, ZERO, Cpi, INCY);
if( Api ) { /* (complex matrix) * (complex vector) */
xGEMV(PTRANSA, M1, N1, AIBI, Api, LDA, Bpi, INCX, ONE, Cpr, INCY);
xGEMV(PTRANSA, M1, N1, AI, Api, LDA, Bpr, INCX, ONE, Cpi, INCY);
} else { /* (real matrix) * (complex vector)
* already done */
}
} else {
if( Api ) { /* (complex matrix) * (real vector) */
xGEMV(PTRANSA, M1, N1, AI, Api, LDA, Bpr, INCX, ZERO, Cpi, INCY);
} else { /* (real matrix) * (real vector)
* already done */
}
}
/* Alternate method ... doesn't match MATLAB exactly ... not up to date
*
* if( transa == 'N' || transa == 'G' || matlab ) {
* if( transa == 'G' ) ptransa = 'N';
* xGEMV(PTRANSA, M1, N1, ONE, Apr, LDA, Bpr, INCX, ZERO, Cpr, INCY);
* if( mxIsComplex(A) ) {
* xGEMV(PTRANSA, M1, N1, AI, Api, LDA, Bpr, INCX, ZERO, Cpi, INCY);
* if( mxIsComplex(B) ) { // (complex matrix) * (complex vector)
* xGEMV(PTRANSA, M1, N1, AIBI, Api, LDA, Bpi, INCX, ONE, Cpr, INCY);
* xGEMV(PTRANSA, M1, N1, BI, Apr, LDA, Bpi, INCX, ONE, Cpi, INCY);
* } else { // (complex matrix) * (real vector)
* // already done
* }
* } else {
* if( mxIsComplex(B) ) { // (real matrix) * (complex vector)
* xGEMV(PTRANSA, M1, N1, BI, Apr, LDA, Bpi, INCX, ZERO, Cpi, INCY);
* } else { // (real matrix) * (real vector)
* // already done
* }
* } */
/*-----------------------------------------------------------------------------------------
* Else if the first matrix is transposed, then use calls to xDOT instead (faster) because
* the matrix can be accessed as a series of contiguous column vectors.
*----------------------------------------------------------------------------------------- */
} else { /* transa == 'T' || transa == 'C' */
apr = Apr;
api = Api;
if( Api ) {
for( i=0; i<m; i++ ) { /* (complex matrix) * (vector) */
z = RealKindDotProduct(k, apr, api, ai, Bpr, Bpi, bi, dot_method);
Cpr[i] = z.r;
Cpi[i] = z.i;
apr += k;
api += k;
}
} else { /* (real matrix) * (complex vector) */
if( Bpi ) {
for( i=0; i<m; i++ ) {
z = RealKindDotProduct(k, apr, Api, ai, Bpr, Bpi, bi, dot_method);
Cpr[i] = z.r;
Cpi[i] = z.i;
apr += k;
}
} else { /* (real matrix) * (real vector) */
for( i=0; i<m; i++ ) {
z = RealKindDotProduct(k, apr, Api, ai, Bpr, Bpi, bi, dot_method);
Cpr[i] = z.r;
apr += k;
}
}
}
}
/*----------------------------------------------------------------------------------------
* Vector times matrix (1 x K) * (K x N)
*---------------------------------------------------------------------------------------- */
} else if( m == 1 ) {
/*----------------------------------------------------------------------------------------
* If the second matrix is transposed, then use calls to xGEMV with the arguments reversed.
* Also use this method if running in 'MATLAB' or 'BLAS' mode, or the number of processors
* is greater than 2.
*---------------------------------------------------------------------------------------- */
if( transb == 'C' || transb == 'T' || mtimesx_mode == MTIMESX_BLAS ||
mtimesx_mode == MTIMESX_MATLAB || omp_get_num_procs() > 2 ) {
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xGEMV) "\n");
}
if( transb == 'C' || transb == 'T' ) {
ptransb = 'N';
} else {
ptransb = 'T';
}
xGEMV(PTRANSB, M2, N2, ONE, Bpr, LDB, Apr, INCX, ZERO, Cpr, INCY);
if( Api ) {
xGEMV(PTRANSB, M2, N2, AI, Bpr, LDB, Api, INCX, ZERO, Cpi, INCY);
if( Bpi ) { /* (complex matrix) * (complex vector) */
xGEMV(PTRANSB, M2, N2, AIBI, Bpi, LDB, Api, INCX, ONE, Cpr, INCY);
xGEMV(PTRANSB, M2, N2, BI, Bpi, LDB, Apr, INCX, ONE, Cpi, INCY);
} else { /* (complex matrix) * (real vector)
* already done */
}
} else {
if( Bpi ) { /* (real matrix) * (complex vector) */
xGEMV(PTRANSB, M2, N2, BI, Bpi, LDB, Apr, INCX, ZERO, Cpi, INCY);
} else { /* (real matrix) * (real vector)
* already done */
}
}
/* Alternate method ... doesn't match MATLAB exactly ... not up to date
*
* if( transb == 'C' || transb == 'T' || matlab ) {
* if( transb == 'C' || transb == 'T' ) {
* ptransb = 'N';
* } else {
* ptransb = 'T';
* }
* xGEMV(PTRANSB, M2, N2, ONE, Bpr, LDB, Apr, INCX, ZERO, Cpr, INCY);
* if( mxIsComplex(B) ) {
* xGEMV(PTRANSB, M2, N2, BI, Bpi, LDB, Apr, INCX, ZERO, Cpi, INCY);
* if( mxIsComplex(A) ) { // (complex matrix) * (complex vector)
* xGEMV(PTRANSB, M2, N2, AIBI, Bpi, LDB, Api, INCX, ONE, Cpr, INCY);
* xGEMV(PTRANSB, M2, N2, AI, Bpr, LDB, Api, INCX, ONE, Cpi, INCY);
* } else { // (real matrix) * (complex vector)
* // already done
* }
* } else {
* if( mxIsComplex(A) ) { // (complex matrix) * (real vector)
* xGEMV(PTRANSB, M2, N2, AI, Bpr, LDB, Api, INCX, ZERO, Cpi, INCY);
* } else { // (real matrix) * (real vector)
* // already done
* }
* } */
/*-----------------------------------------------------------------------------------------
* Else if the second matrix is not transposed, then use calls to dot product instead
* (faster) because the matrix can be accessed as a series of contiguous column vectors.
*----------------------------------------------------------------------------------------- */
} else {
bpr = Bpr;
bpi = Bpi;
if( Bpi ) {
for( i=0; i<n; i++ ) {
z = RealKindDotProduct(k, Apr, Api, ai, bpr, bpi, bi, dot_method);
Cpr[i] = z.r;
Cpi[i] = z.i;
bpr += k;
bpi += k;
}
} else { /* (complex vector) * (real matrix) */
if( Api ) {
for( i=0; i<n; i++ ) {
z = RealKindDotProduct(k, Apr, Api, ai, bpr, Bpi, bi, dot_method);
Cpr[i] = z.r;
Cpi[i] = z.i;
bpr += k;
}
} else { /* (real vector) * (real matrix) */
for( i=0; i<n; i++ ) {
z = RealKindDotProduct(k, Apr, Api, ai, bpr, Bpi, bi, dot_method);
Cpr[i] = z.r;
bpr += k;
}
}
}
}
/*---------------------------------------------------------------------------------
* Matrix times matrix (M x K) * (K x N) with N small and first matrix transposed.
* Use dot product (faster) because the 1st matrix can be accessed as a series of
* contiguous column vectors. When the column size reaches about 8 then the memory
* access efficiency of the BLAS routines increases and this custom method is no
* longer faster. The number 8 is likely machine / implementation dependent. Only
* use this method if running in one of the 'SPEED' or 'LOOPS' type modes.
*--------------------------------------------------------------------------------- */
} else if( !(mtimesx_mode == MTIMESX_BLAS || mtimesx_mode == MTIMESX_MATLAB) &&
n < 7 && (transa == 'T' || transa == 'C') && (transb == 'N' || transb == 'G') ) {
bpr = Bpr;
bpi = Bpi;
cpr = Cpr;
cpi = Cpi;
for( j=0; j<n; j++ ) {
apr = Apr;
api = Api;
for( i=0; i<m; i++ ) {
z = RealKindDotProduct(k, apr, api, ai, bpr, bpi, bi, dot_method);
*cpr++ = z.r;
if( cpi ) *cpi++ = z.i;
apr += k;
if( api ) api += k;
}
bpr += k;
if( bpi ) bpi += k;
}
/*---------------------------------------------------------------------------------------------
* Matrix times matrix (M x K) *(K x N)
*--------------------------------------------------------------------------------------------- */
} else {
/*/--------------------------------------------------------------------------------------------
* If Matrix product is actually the same matrix, use calls to the symmetric routines xSYRK and
* xSYR2K where possible. These only work on the lower or upper triangle part of the matrix, so
* we will have to fill out the other half manually, but even so this will be faster. Some of
* these will not match MATLAB exactly, so only run them in cases where matching is not required.
*--------------------------------------------------------------------------------------------- */
if( Apr == Bpr && ((transa == 'N' && transb == 'T') ||
(transa == 'T' && transb == 'N')) ) {
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYRK) " and " TOKENSTRING(xSYR2K) "\n");
}
xSYRK(UPLO, TRANSA, N, K, ONE, Apr, LDA, ZERO, Cpr, LDC);
if( Api ) {
xSYRK(UPLO, TRANSA, N, K, MINUSONE, Api, LDA, ONE, Cpr, LDC);
xSYR2K(UPLO,TRANSA, N, K, ONE, Api, LDA, Apr, LDA, ZERO, Cpi, LDC);
xFILLPOS(Cpi, n);
}
xFILLPOS(Cpr, n);
} else if( Apr == Bpr && (!(mtimesx_mode == MTIMESX_BLAS || mtimesx_mode == MTIMESX_MATLAB) ||
(!Api && !Bpi)) &&
((transa == 'G' && transb == 'C') ||
(transa == 'C' && transb == 'G')) ) {
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYRK) " and " TOKENSTRING(xSYR2K) "\n");
}
if( transa == 'G') ptransa = 'N';
xSYRK(UPLO, PTRANSA, N, K, ONE, Apr, LDA, ZERO, Cpr, LDC);
if( Api ) {
xSYRK(UPLO, PTRANSA, N, K, MINUSONE, Api, LDA, ONE, Cpr, LDC);
xSYR2K(UPLO,PTRANSA, N, K, MINUSONE, Apr, LDA, Api, LDA, ZERO, Cpi, LDC);
xFILLPOS(Cpi, n);
}
xFILLPOS(Cpr, n);
} else if( Apr == Bpr && ((transa == 'N' && transb == 'C') ||
(transa == 'T' && transb == 'G' &&
(!(mtimesx_mode == MTIMESX_BLAS || mtimesx_mode == MTIMESX_MATLAB) ||
(!Api && !Bpi)))) ) {
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYRK) " and " TOKENSTRING(xGEMM) "\n");
}
if( transb == 'G' ) ptransb = 'N';
xSYRK(UPLO, TRANSA, N, K, ONE, Apr, LDA, ZERO, Cpr, LDC);
if( Api ) {
xSYRK(UPLO, TRANSA, N, K, ONE, Api, LDA, ONE, Cpr, LDC);
xGEMM(TRANSA, PTRANSB, M, N, K, ONE, Api, LDA, Apr, LDA, ZERO, Cpi, LDC);
xFILLNEG(Cpi, n);
}
xFILLPOS(Cpr, n);
} else if( Apr == Bpr && ((transa == 'C' && transb == 'N') ||
(transa == 'G' && transb == 'T' &&
(!(mtimesx_mode == MTIMESX_BLAS || mtimesx_mode == MTIMESX_MATLAB) ||
(!Api && !Bpi)))) ) {
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYRK) " and " TOKENSTRING(xGEMM) "\n");
}
if( transa == 'G' ) ptransa = 'N';
xSYRK(UPLO, PTRANSA, N, K, ONE, Apr, LDA, ZERO, Cpr, LDC);
if( Api ) {
xSYRK(UPLO, PTRANSA, N, K, ONE, Api, LDA, ONE, Cpr, LDC);
xGEMM(PTRANSA, TRANSB, M, N, K, ONE, Apr, LDA, Api, LDA, ZERO, Cpi, LDC);
xFILLNEG(Cpi, n);
}
xFILLPOS(Cpr, n);
/*-------------------------------------------------------------------------------------------
* Else this is not a symmetric case, so just call the general matrix multiply routine xGEMM.
*------------------------------------------------------------------------------------------- */
} else {
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xGEMM) "\n");
}
if( transa == 'G' ) ptransa = 'N';
if( transb == 'G' ) ptransb = 'N';
xGEMM(PTRANSA, PTRANSB, M, N, K, ONE, Apr, LDA, Bpr, LDB, ZERO, Cpr, LDC);
if( Bpi ) {
xGEMM(PTRANSA, PTRANSB, M, N, K, BI, Apr, LDA, Bpi, LDB, ZERO, Cpi, LDC);
if( Api ) { /* (complex matrix) * (complex matrix) */
xGEMM(PTRANSA, PTRANSB, M, N, K, AIBI, Api, LDA, Bpi, LDB, ONE, Cpr, LDC);
xGEMM(PTRANSA, PTRANSB, M, N, K, AI, Api, LDA, Bpr, LDB, ONE, Cpi, LDC);
} else { /* (real matrix) * (complex matrix)
* already done */
}
} else {
if( Api ) { /* (complex matrix) * (real matrix) */
xGEMM(PTRANSA, PTRANSB, M, N, K, AI, Api, LDA, Bpr, LDB, ZERO, Cpi, LDC);
} else { /* (real matrix) * (real matrix)
* already done */
}
}
/* Alternate method ... doesn't match MATLAB exactly ... not up to date
*
* } else {
* if( transa == 'G' ) ptransa = 'N';
* if( transb == 'G' ) ptransb = 'N';
* xGEMM(PTRANSA, PTRANSB, M, N, K, ONE, Apr, LDA, Bpr, LDB, ZERO, Cpr, LDC);
* if( mxIsComplex(A) ) {
* xGEMM(PTRANSA, PTRANSB, M, N, K, AI, Api, LDA, Bpr, LDB, ZERO, Cpi, LDC);
* if( mxIsComplex(B) ) { // (complex matrix) * (complex matrix)
* xGEMM(PTRANSA, PTRANSB, M, N, K, AIBI, Api, LDA, Bpi, LDB, ONE, Cpr, LDC);
* xGEMM(PTRANSA, PTRANSB, M, N, K, BI, Apr, LDA, Bpi, LDB, ONE, Cpi, LDC);
* } else { // (complex matrix) * (real matrix)
* // already done
* }
* } else {
* if( mxIsComplex(B) ) { // (real matrix) * (complex matrix)
* xGEMM(PTRANSA, PTRANSB, M, N, K, BI, Apr, LDA, Bpi, LDB, ZERO, Cpi, LDC);
* } else { // (real matrix) * (real matrix)
* // already done
* }
* } */
}
}
/*----------------------------------------------------------------------------
* End Outer Loop to process all of the individual matrix multiplies. Increment
* the matrix pointers to point to the next pair of matrices to be multiplied.
*---------------------------------------------------------------------------- */
if( ip < p-1 ) {
j = 2;
while( ++Cindx[j] == Cdims[j] ) Cindx[j++] = 0;
Ap = Bp = 0;
Ablock = Asize;
Bblock = Bsize;
for( j=2; j<Cndim; j++ ) {
if( Cindx[j] < Adimz[j] ) Ap += Cindx[j] * Ablock;
Ablock *= Adimz[j];
if( Cindx[j] < Bdimz[j] ) Bp += Cindx[j] * Bblock;
Bblock *= Bdimz[j];
}
Apr = Apr0 + Ap;
if( Api ) Api = Api0 + Ap;
Bpr = Bpr0 + Bp;
if( Bpi ) Bpi = Bpi0 + Bp;
Cpr += Csize;
if( Cpi ) Cpi += Csize;
}
debug_message = 0;
}
mxFree(Cindx);
mxFree(Cdims);
mxFree(Adimz);
mxFree(Bdimz);
/*---------------------------------------------------------------------------------
* If the imaginary part is all zero, then free it and set the pointer to NULL.
*--------------------------------------------------------------------------------- */
Cpi = mxGetImagData(C);
if( AllRealZero(Cpi, m*n*p) ) {
mxFree(Cpi);
mxSetImagData(C, NULL);
}
/*---------------------------------------------------------------------------------
* Done.
*--------------------------------------------------------------------------------- */
if( destroyA ) mxDestroyArray(A);
if( destroyB ) mxDestroyArray(B);
return result;
}
/*--------------------------------------------------------------------------------------
*--------------------------------------------------------------------------------------
*--------------------------------------------------------------------------------------
*-------------------------------------------------------------------------------------- */
/*--------------------------------------------------------------------------------------
* Returns 1 (true) if all of the elements are zero. Returns 0 (false) if at least one
* of the elements is non-zero, or if the pointer to the data is NULL.
*-------------------------------------------------------------------------------------- */
int AllRealZero(RealKind *x, mwSignedIndex n)
{
register mwSignedIndex i;
if( x == NULL ) return 0;
for(i=0; i<n; i++) {
if( x[i] != zero ) return 0;
}
return 1;
}
/*--------------------------------------------------------------------------------------
* C = (1 + 0*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqP1P0TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
Cpi[i] = Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + 0*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqP1P0TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
Cpi[i] = -Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + 0*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqP1P0TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
*Cpi++ = *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + 0*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqP1P0TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
*Cpi++ = -(*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + 1*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqP1P1TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
Cpi[i] = Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i] - Bpi[i];
Cpi[i] = Bpr[i] + Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + 1*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqP1P1TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
Cpi[i] = Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i] + Bpi[i];
Cpi[i] = Bpr[i] - Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + 1*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqP1P1TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
*Cpi++ = *bpr;
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr - *bpi;
*Cpi++ = *bpr + *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + 1*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqP1P1TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
*Cpi++ = *bpr;
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr + *bpi;
*Cpi++ = *bpr - *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 - 1*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqP1M1TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
Cpi[i] = -Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i] + Bpi[i];
Cpi[i] = Bpi[i] - Bpr[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 - 1*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqP1M1TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
Cpi[i] = -Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i] - Bpi[i];
Cpi[i] = -Bpi[i] - Bpr[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 - 1*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqP1M1TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
*Cpi++ = -(*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr + *bpi;
*Cpi++ = *bpi - *bpr;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 - 1*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqP1M1TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
*Cpi++ = -(*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr - *bpi;
*Cpi++ = - *bpr - *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + ai*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqP1PxTimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
Cpi[i] = ai * Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i] - ai * Bpi[i];
Cpi[i] = ai * Bpr[i] + Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + ai*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqP1PxTimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i];
Cpi[i] = ai * Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = Bpr[i] + ai * Bpi[i];
Cpi[i] = ai * Bpr[i] - Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + ai*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqP1PxTimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
*Cpi++ = ai * (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr - ai * (*bpi);
*Cpi++ = ai * (*bpr) + *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (1 + ai*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqP1PxTimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr;
*Cpi++ = ai * (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = *bpr + ai * (*bpi);
*Cpi++ = ai * (*bpr) - *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + 1*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqM1P1TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i];
Cpi[i] = Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i] - Bpi[i];
Cpi[i] = Bpr[i] - Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + 1*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqM1P1TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i];
Cpi[i] = Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i] + Bpi[i];
Cpi[i] = Bpr[i] + Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + 1*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqM1P1TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr);
*Cpi++ = *bpr;
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr) - (*bpi);
*Cpi++ = *bpr - (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + 1*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqM1P1TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr);
*Cpi++ = *bpr;
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr) + (*bpi);
*Cpi++ = *bpr + (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 - 1*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqM1M1TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i];
Cpi[i] = -Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i] + Bpi[i];
Cpi[i] = -Bpi[i] - Bpr[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 - 1*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqM1M1TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i];
Cpi[i] = -Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i] - Bpi[i];
Cpi[i] = Bpi[i] - Bpr[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 - 1*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqM1M1TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr);
*Cpi++ = -(*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr) + (*bpi);
*Cpi++ = -(*bpr) - (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 - 1*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqM1M1TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr);
*Cpi++ = -(*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr) - (*bpi);
*Cpi++ = -(*bpr) + (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + 0*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqM1P0TimesRealKindN(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i];
Cpi[i] = -Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + 0*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqM1P0TimesRealKindG(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i];
Cpi[i] = Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + 0*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqM1P0TimesRealKindT(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr);
*Cpi++ = -(*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + 0*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqM1P0TimesRealKindC(RealKind *Cpr, RealKind *Cpi,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr);
*Cpi++ = *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + ai*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqM1PxTimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = - Bpr[i];
Cpi[i] = ai * Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i] - ai * Bpi[i];
Cpi[i] = ai * Bpr[i] - Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + ai*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqM1PxTimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = - Bpr[i];
Cpi[i] = ai * Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = -Bpr[i] + ai * Bpi[i];
Cpi[i] = ai * Bpr[i] + Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + ai*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqM1PxTimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = - (*bpr);
*Cpi++ = ai * (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr) - ai * (*bpi);
*Cpi++ = ai * (*bpr) - *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (-1 + ai*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqM1PxTimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = - (*bpr);
*Cpi++ = ai * (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = -(*bpr) + ai * (*bpi);
*Cpi++ = ai * (*bpr) + *bpi;
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + 1*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqPxP1TimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i] - Bpi[i];
Cpi[i] = Bpr[i] + ar * Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + 1*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqPxP1TimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i] + Bpi[i];
Cpi[i] = Bpr[i] - ar * Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + 1*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqPxP1TimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
*Cpi++ = *bpr;
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr) - (*bpi);
*Cpi++ = (*bpr) + ar * (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + 1*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqPxP1TimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
*Cpi++ = *bpr;
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr) + (*bpi);
*Cpi++ = (*bpr) - ar * (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar - 1*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqPxM1TimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = - Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i] + Bpi[i];
Cpi[i] = -Bpr[i] + ar * Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar - 1*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqPxM1TimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = - Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i] - Bpi[i];
Cpi[i] = -Bpr[i] - ar * Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar - 1*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqPxM1TimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
*Cpi++ = - (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr) + (*bpi);
*Cpi++ = -(*bpr) + ar * (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar - 1*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqPxM1TimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
*Cpi++ = - (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr) - (*bpi);
*Cpi++ = -(*bpr) - ar * (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + 0*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqPxP0TimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = ar * Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + 0*i) * (Bpr - Bpi * i) */
void RealKindEqPxP0TimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = -ar * Bpi[i];
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + 0*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqPxP0TimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
*Cpi++ = ar * (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + 0*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqPxP0TimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ar,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
*Cpi++ = -ar * (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + ai*i) * (Bpr + Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqPxPxTimesRealKindN(RealKind *Cpr, RealKind *Cpi, RealKind ar, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
if( ai == zero ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = ai * Bpr[i];
}
}
} else {
if( ai == zero ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = ar * Bpi[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i] - ai * Bpi[i];
Cpi[i] = ai * Bpr[i] + ar * Bpi[i];
}
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + ai*i) * (Bpr - Bpi * i)
*-------------------------------------------------------------------------------------- */
void RealKindEqPxPxTimesRealKindG(RealKind *Cpr, RealKind *Cpi, RealKind ar, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSignedIndex n)
{
register mwSignedIndex i;
if( Bpi == NULL ) {
if( ai == zero ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = ai * Bpr[i];
}
}
} else {
if( ai == zero ) {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i];
Cpi[i] = -ar * Bpi[i];
}
} else {
for(i=0; i<n; i++) {
Cpr[i] = ar * Bpr[i] + ai * Bpi[i];
Cpi[i] = ai * Bpr[i] - ar * Bpi[i];
}
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + ai*i) * (Bpr + Bpi * i)T
*-------------------------------------------------------------------------------------- */
void RealKindEqPxPxTimesRealKindT(RealKind *Cpr, RealKind *Cpi, RealKind ar, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
*Cpi++ = ai * (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr) - ai * (*bpi);
*Cpi++ = ai * (*bpr) + ar * (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* C = (ar + ai*i) * (Bpr + Bpi * i)C
*-------------------------------------------------------------------------------------- */
void RealKindEqPxPxTimesRealKindC(RealKind *Cpr, RealKind *Cpi, RealKind ar, RealKind ai,
RealKind *Bpr, RealKind *Bpi, mwSize m2, mwSize n2, mwSignedIndex p)
{
register mwSize i, j;
RealKind *bpr, *Br = Bpr;
RealKind *bpi, *Bi = Bpi;
register mwSignedIndex ip;
if( Bpi == NULL ) {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr);
*Cpi++ = ai * (*bpr);
bpr += m2;
}
Bpr++;
}
Br += m2 * n2;
}
} else {
for( ip=0; ip<p; ip++ ) {
Bpr = Br;
Bpi = Bi;
for( i=0; i<m2; i++ ) {
bpr = Bpr;
bpi = Bpi;
for( j=0; j<n2; j++ ) {
*Cpr++ = ar * (*bpr) + ai * (*bpi);
*Cpi++ = ai * (*bpr) - ar * (*bpi);
bpr += m2;
bpi += m2;
}
Bpr++;
Bpi++;
}
Br += m2 * n2;
Bi += m2 * n2;
}
}
}
/*--------------------------------------------------------------------------------------
* Fill the upper triangle with contents of the lower triangle
*-------------------------------------------------------------------------------------- */
void xFILLPOS(RealKind *Cpr, mwSignedIndex n)
{
RealKind *source, *target;
register mwSignedIndex i, j;
source = Cpr + 1;
target = Cpr + n;
for( i=1; i<n; i++ ) {
for( j=i; j<n; j++ ) {
*target = *source;
target += n;
source++;
}
source += i + 1;
target = source + n - 1;
}
}
/*--------------------------------------------------------------------------------------
* Add the -Cpr transpose to the current array
*-------------------------------------------------------------------------------------- */
void xFILLNEG(RealKind *Cpr, mwSignedIndex n)
{
RealKind *source, *target;
register mwSignedIndex i, j;
source = Cpr;
target = Cpr;
for( i=0; i<n; i++ ) {
for( j=i; j<n; j++ ) {
*target -= *source;
if( i != j ) {
*source = -(*target);
}
target += n;
source++;
}
source += i + 1;
target = source;
}
}
/*------------------------------------------------------------------------------------------
* Dot Product type calculation. For conjugate cases, where ai == -1 or bi == -1, use custom
* loops instead of doing the actual multply by ai or bi. Also, use loop unrolling to speed
* up the calculations and improve the accuracy. For PC WinXP, the balance between speed and
* accuracy seemed to be optimal at a blocksize of 10. If the MATLAB mode is set, then just
* duplicate the BLAS calls that MATLAB uses (slower since it accesses the variables twice).
*------------------------------------------------------------------------------------------ */
#ifdef _OPENMP
/*------------------------------------------------------------------------------------------
* Interface for OpenMP enabled compiling.
*------------------------------------------------------------------------------------------ */
struct RealKindComplex RealKindDotProduct(mwSignedIndex k,
RealKind *Apr, RealKind *Api, RealKind ai,
RealKind *Bpr, RealKind *Bpi, RealKind bi,
int dot_method)
{
struct RealKindComplex RealKindZZ[OMP_DOT_ARRAY_SIZE];
struct RealKindComplex z;
struct RealKindComplex *zz;
int i, j, n;
if( dot_method != METHOD_LOOPS_OMP ||
(mtimesx_mode == MTIMESX_SPEED_OMP && k < OMP_DOT_SMALL) ) {
if( debug_message ) {
if( dot_method == METHOD_BLAS ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xDOT) "\n");
} else {
mexPrintf("MTIMESX: LOOPS dot product\n");
}
debug_message = 0;
}
z = RealKindDotProductX(k, Apr, Api, ai, Bpr, Bpi, bi, dot_method);
} else {
if( debug_message ) {
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS dot product\n");
debug_message = 0;
}
if( max_threads <= OMP_DOT_ARRAY_SIZE ) {
zz = RealKindZZ;
} else {
zz = mxMalloc(max_threads * sizeof(*zz));
}
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Apr_, *Api_, *Bpr_, *Bpi_;
mwSize k_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = k / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
k_ = k - offset;
} else {
k_ = blocksize;
}
Apr_ = Apr + offset;
if( Api ) {
Api_ = Api + offset;
} else {
Api_ = NULL;
}
Bpr_ = Bpr + offset;
if( Bpi ) {
Bpi_ = Bpi + offset;
} else {
Bpi_ = NULL;
}
zz[thread_num] = RealKindDotProductX(k_, Apr_, Api_, ai, Bpr_, Bpi_, bi, dot_method);
}
/* Combine thread results in a pre-determined order, so result with same inputs will be the */
/* same regardless of the order in which the threads execute and complete. Use a binary */
/* reduction scheme to maintain accuracy. */
n = threads_used - 1;
while( n ) {
if( !(n & 1) ) {
zz[n-1].r += zz[n].r;
zz[n-1].i += zz[n].i;
n--;
}
for( i=0, j=0; i<n; i+=2, j++ ) {
zz[j].r = zz[i].r + zz[i+1].r;
zz[j].i = zz[i].i + zz[i+1].i;
}
n >>= 1;
}
z = zz[0];
/*
z.r = zero;
z.i = zero;
for( i=0; i<threads_used; i++ ) {
z.r += zz[i].r;
z.i += zz[i].i;
}
*/
if( zz != RealKindZZ ) {
mxFree(zz);
}
}
return z;
}
struct RealKindComplex RealKindDotProductX(mwSignedIndex k,
RealKind *Apr, RealKind *Api, RealKind ai,
RealKind *Bpr, RealKind *Bpi, RealKind bi,
int dot_method)
#else
/*------------------------------------------------------------------------------------------
* Interface for non-OpenMP enabled compiling.
*------------------------------------------------------------------------------------------ */
struct RealKindComplex RealKindDotProduct(mwSignedIndex k,
RealKind *Apr, RealKind *Api, RealKind ai,
RealKind *Bpr, RealKind *Bpi, RealKind bi,
int dot_method)
#endif
{
register double sr = 0.0, si = 0.0;
register mwSignedIndex i;
struct RealKindComplex z;
mwSignedIndex k10, inc = 1;
if( dot_method == METHOD_BLAS ) {
#ifndef _OPENMP
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xDOT) "\n");
debug_message = 0;
}
#endif
sr = xDOT( &k, Apr, &inc, Bpr, &inc );
if( Api != NULL ) {
if( Api != Bpi || ai == bi ) {
si = xDOT( &k, Api, &inc, Bpr, &inc ) * ai;
}
if( Bpi != NULL ) {
sr -= xDOT( &k, Api, &inc, Bpi, &inc ) * ai * bi;
if( Api != Bpi ) {
si += xDOT( &k, Apr, &inc, Bpi, &inc ) * bi;
} else if( ai == bi ){
si += si;
}
}
} else if( Bpi != NULL ) {
si = xDOT( &k, Apr, &inc, Bpi, &inc ) * bi;
}
z.r = (RealKind) sr;
z.i = (RealKind) si;
return z;
}
#ifndef _OPENMP
if( debug_message ) {
mexPrintf("MTIMESX: LOOPS dot product\n");
debug_message = 0;
}
#endif
k10 = k % 10;
if( Api != NULL ) {
if( Bpi != NULL ) { /* (complex vector) dot (complex vector) */
if( ai == one ) {
if( bi == one ) {
if( Apr == Bpr ) {
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Apr[i] - Api[i] * Api[i];
si += Api[i] * Apr[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Apr[i ] - Api[i ] * Api[i ]
+ Apr[i+1] * Apr[i+1] - Api[i+1] * Api[i+1]
+ Apr[i+2] * Apr[i+2] - Api[i+2] * Api[i+2]
+ Apr[i+3] * Apr[i+3] - Api[i+3] * Api[i+3]
+ Apr[i+4] * Apr[i+4] - Api[i+4] * Api[i+4]
+ Apr[i+5] * Apr[i+5] - Api[i+5] * Api[i+5]
+ Apr[i+6] * Apr[i+6] - Api[i+6] * Api[i+6]
+ Apr[i+7] * Apr[i+7] - Api[i+7] * Api[i+7]
+ Apr[i+8] * Apr[i+8] - Api[i+8] * Api[i+8]
+ Apr[i+9] * Apr[i+9] - Api[i+9] * Api[i+9];
si += Api[i ] * Apr[i ]
+ Api[i+1] * Apr[i+1]
+ Api[i+2] * Apr[i+2]
+ Api[i+3] * Apr[i+3]
+ Api[i+4] * Apr[i+4]
+ Api[i+5] * Apr[i+5]
+ Api[i+6] * Apr[i+6]
+ Api[i+7] * Apr[i+7]
+ Api[i+8] * Apr[i+8]
+ Api[i+9] * Apr[i+9];
}
si += si;
} else {
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Bpr[i] - Api[i] * Bpi[i];
si += Api[i] * Bpr[i] + Apr[i] * Bpi[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Bpr[i ] - Api[i ] * Bpi[i ]
+ Apr[i+1] * Bpr[i+1] - Api[i+1] * Bpi[i+1]
+ Apr[i+2] * Bpr[i+2] - Api[i+2] * Bpi[i+2]
+ Apr[i+3] * Bpr[i+3] - Api[i+3] * Bpi[i+3]
+ Apr[i+4] * Bpr[i+4] - Api[i+4] * Bpi[i+4]
+ Apr[i+5] * Bpr[i+5] - Api[i+5] * Bpi[i+5]
+ Apr[i+6] * Bpr[i+6] - Api[i+6] * Bpi[i+6]
+ Apr[i+7] * Bpr[i+7] - Api[i+7] * Bpi[i+7]
+ Apr[i+8] * Bpr[i+8] - Api[i+8] * Bpi[i+8]
+ Apr[i+9] * Bpr[i+9] - Api[i+9] * Bpi[i+9];
si += Api[i ] * Bpr[i ] + Apr[i ] * Bpi[i ]
+ Api[i+1] * Bpr[i+1] + Apr[i+1] * Bpi[i+1]
+ Api[i+2] * Bpr[i+2] + Apr[i+2] * Bpi[i+2]
+ Api[i+3] * Bpr[i+3] + Apr[i+3] * Bpi[i+3]
+ Api[i+4] * Bpr[i+4] + Apr[i+4] * Bpi[i+4]
+ Api[i+5] * Bpr[i+5] + Apr[i+5] * Bpi[i+5]
+ Api[i+6] * Bpr[i+6] + Apr[i+6] * Bpi[i+6]
+ Api[i+7] * Bpr[i+7] + Apr[i+7] * Bpi[i+7]
+ Api[i+8] * Bpr[i+8] + Apr[i+8] * Bpi[i+8]
+ Api[i+9] * Bpr[i+9] + Apr[i+9] * Bpi[i+9];
}
}
} else {
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Bpr[i] + Api[i] * Bpi[i];
si += Api[i] * Bpr[i] - Apr[i] * Bpi[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Bpr[i ] + Api[i ] * Bpi[i ]
+ Apr[i+1] * Bpr[i+1] + Api[i+1] * Bpi[i+1]
+ Apr[i+2] * Bpr[i+2] + Api[i+2] * Bpi[i+2]
+ Apr[i+3] * Bpr[i+3] + Api[i+3] * Bpi[i+3]
+ Apr[i+4] * Bpr[i+4] + Api[i+4] * Bpi[i+4]
+ Apr[i+5] * Bpr[i+5] + Api[i+5] * Bpi[i+5]
+ Apr[i+6] * Bpr[i+6] + Api[i+6] * Bpi[i+6]
+ Apr[i+7] * Bpr[i+7] + Api[i+7] * Bpi[i+7]
+ Apr[i+8] * Bpr[i+8] + Api[i+8] * Bpi[i+8]
+ Apr[i+9] * Bpr[i+9] + Api[i+9] * Bpi[i+9];
si += Api[i ] * Bpr[i ] - Apr[i ] * Bpi[i ]
+ Api[i+1] * Bpr[i+1] - Apr[i+1] * Bpi[i+1]
+ Api[i+2] * Bpr[i+2] - Apr[i+2] * Bpi[i+2]
+ Api[i+3] * Bpr[i+3] - Apr[i+3] * Bpi[i+3]
+ Api[i+4] * Bpr[i+4] - Apr[i+4] * Bpi[i+4]
+ Api[i+5] * Bpr[i+5] - Apr[i+5] * Bpi[i+5]
+ Api[i+6] * Bpr[i+6] - Apr[i+6] * Bpi[i+6]
+ Api[i+7] * Bpr[i+7] - Apr[i+7] * Bpi[i+7]
+ Api[i+8] * Bpr[i+8] - Apr[i+8] * Bpi[i+8]
+ Api[i+9] * Bpr[i+9] - Apr[i+9] * Bpi[i+9];
}
}
} else {
if( bi == one ) {
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Bpr[i] + Api[i] * Bpi[i];
si += Apr[i] * Bpi[i] - Api[i] * Bpr[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Bpr[i ] + Api[i ] * Bpi[i ]
+ Apr[i+1] * Bpr[i+1] + Api[i+1] * Bpi[i+1]
+ Apr[i+2] * Bpr[i+2] + Api[i+2] * Bpi[i+2]
+ Apr[i+3] * Bpr[i+3] + Api[i+3] * Bpi[i+3]
+ Apr[i+4] * Bpr[i+4] + Api[i+4] * Bpi[i+4]
+ Apr[i+5] * Bpr[i+5] + Api[i+5] * Bpi[i+5]
+ Apr[i+6] * Bpr[i+6] + Api[i+6] * Bpi[i+6]
+ Apr[i+7] * Bpr[i+7] + Api[i+7] * Bpi[i+7]
+ Apr[i+8] * Bpr[i+8] + Api[i+8] * Bpi[i+8]
+ Apr[i+9] * Bpr[i+9] + Api[i+9] * Bpi[i+9];
si += Apr[i ] * Bpi[i ] - Api[i ] * Bpr[i ]
+ Apr[i+1] * Bpi[i+1] - Api[i+1] * Bpr[i+1]
+ Apr[i+2] * Bpi[i+2] - Api[i+2] * Bpr[i+2]
+ Apr[i+3] * Bpi[i+3] - Api[i+3] * Bpr[i+3]
+ Apr[i+4] * Bpi[i+4] - Api[i+4] * Bpr[i+4]
+ Apr[i+5] * Bpi[i+5] - Api[i+5] * Bpr[i+5]
+ Apr[i+6] * Bpi[i+6] - Api[i+6] * Bpr[i+6]
+ Apr[i+7] * Bpi[i+7] - Api[i+7] * Bpr[i+7]
+ Apr[i+8] * Bpi[i+8] - Api[i+8] * Bpr[i+8]
+ Apr[i+9] * Bpi[i+9] - Api[i+9] * Bpr[i+9];
}
} else {
if( Apr == Bpr ) {
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Apr[i] - Api[i] * Api[i];
si -= Api[i] * Apr[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Apr[i ] - Api[i ] * Api[i ]
+ Apr[i+1] * Apr[i+1] - Api[i+1] * Api[i+1]
+ Apr[i+2] * Apr[i+2] - Api[i+2] * Api[i+2]
+ Apr[i+3] * Apr[i+3] - Api[i+3] * Api[i+3]
+ Apr[i+4] * Apr[i+4] - Api[i+4] * Api[i+4]
+ Apr[i+5] * Apr[i+5] - Api[i+5] * Api[i+5]
+ Apr[i+6] * Apr[i+6] - Api[i+6] * Api[i+6]
+ Apr[i+7] * Apr[i+7] - Api[i+7] * Api[i+7]
+ Apr[i+8] * Apr[i+8] - Api[i+8] * Api[i+8]
+ Apr[i+9] * Apr[i+9] - Api[i+9] * Api[i+9];
si -= Api[i ] * Apr[i ]
+ Api[i+1] * Apr[i+1]
+ Api[i+2] * Apr[i+2]
+ Api[i+3] * Apr[i+3]
+ Api[i+4] * Apr[i+4]
+ Api[i+5] * Apr[i+5]
+ Api[i+6] * Apr[i+6]
+ Api[i+7] * Apr[i+7]
+ Api[i+8] * Apr[i+8]
+ Api[i+9] * Apr[i+9];
}
si += si;
} else {
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Bpr[i] - Api[i] * Bpi[i];
si -= Api[i] * Bpr[i] + Apr[i] * Bpi[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Bpr[i ] - Api[i ] * Bpi[i ]
+ Apr[i+1] * Bpr[i+1] - Api[i+1] * Bpi[i+1]
+ Apr[i+2] * Bpr[i+2] - Api[i+2] * Bpi[i+2]
+ Apr[i+3] * Bpr[i+3] - Api[i+3] * Bpi[i+3]
+ Apr[i+4] * Bpr[i+4] - Api[i+4] * Bpi[i+4]
+ Apr[i+5] * Bpr[i+5] - Api[i+5] * Bpi[i+5]
+ Apr[i+6] * Bpr[i+6] - Api[i+6] * Bpi[i+6]
+ Apr[i+7] * Bpr[i+7] - Api[i+7] * Bpi[i+7]
+ Apr[i+8] * Bpr[i+8] - Api[i+8] * Bpi[i+8]
+ Apr[i+9] * Bpr[i+9] - Api[i+9] * Bpi[i+9];
si -= Api[i ] * Bpr[i ] + Apr[i ] * Bpi[i ]
+ Api[i+1] * Bpr[i+1] + Apr[i+1] * Bpi[i+1]
+ Api[i+2] * Bpr[i+2] + Apr[i+2] * Bpi[i+2]
+ Api[i+3] * Bpr[i+3] + Apr[i+3] * Bpi[i+3]
+ Api[i+4] * Bpr[i+4] + Apr[i+4] * Bpi[i+4]
+ Api[i+5] * Bpr[i+5] + Apr[i+5] * Bpi[i+5]
+ Api[i+6] * Bpr[i+6] + Apr[i+6] * Bpi[i+6]
+ Api[i+7] * Bpr[i+7] + Apr[i+7] * Bpi[i+7]
+ Api[i+8] * Bpr[i+8] + Apr[i+8] * Bpi[i+8]
+ Api[i+9] * Bpr[i+9] + Apr[i+9] * Bpi[i+9];
}
}
}
}
z.i = (RealKind) si;
} else { /* (complex vector) dot (real vector) */
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Bpr[i];
si += Api[i] * Bpr[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Bpr[i ]
+ Apr[i+1] * Bpr[i+1]
+ Apr[i+2] * Bpr[i+2]
+ Apr[i+3] * Bpr[i+3]
+ Apr[i+4] * Bpr[i+4]
+ Apr[i+5] * Bpr[i+5]
+ Apr[i+6] * Bpr[i+6]
+ Apr[i+7] * Bpr[i+7]
+ Apr[i+8] * Bpr[i+8]
+ Apr[i+9] * Bpr[i+9];
si += Api[i ] * Bpr[i ]
+ Api[i+1] * Bpr[i+1]
+ Api[i+2] * Bpr[i+2]
+ Api[i+3] * Bpr[i+3]
+ Api[i+4] * Bpr[i+4]
+ Api[i+5] * Bpr[i+5]
+ Api[i+6] * Bpr[i+6]
+ Api[i+7] * Bpr[i+7]
+ Api[i+8] * Bpr[i+8]
+ Api[i+9] * Bpr[i+9];
}
z.i = (RealKind) (si * ai);
}
} else {
if( Bpi != NULL ) { /* (real vector) dot (complex vector) */
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Bpr[i];
si += Apr[i] * Bpi[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Bpr[i ]
+ Apr[i+1] * Bpr[i+1]
+ Apr[i+2] * Bpr[i+2]
+ Apr[i+3] * Bpr[i+3]
+ Apr[i+4] * Bpr[i+4]
+ Apr[i+5] * Bpr[i+5]
+ Apr[i+6] * Bpr[i+6]
+ Apr[i+7] * Bpr[i+7]
+ Apr[i+8] * Bpr[i+8]
+ Apr[i+9] * Bpr[i+9];
si += Apr[i ] * Bpi[i ]
+ Apr[i+1] * Bpi[i+1]
+ Apr[i+2] * Bpi[i+2]
+ Apr[i+3] * Bpi[i+3]
+ Apr[i+4] * Bpi[i+4]
+ Apr[i+5] * Bpi[i+5]
+ Apr[i+6] * Bpi[i+6]
+ Apr[i+7] * Bpi[i+7]
+ Apr[i+8] * Bpi[i+8]
+ Apr[i+9] * Bpi[i+9];
}
z.i = (RealKind) (si * bi);
} else { /* (real vector) dot (real vector) */
for( i=0; i<k10; i++ ) {
sr += Apr[i] * Bpr[i];
}
for( i=k10; i<k; i+=10 ) {
sr += Apr[i ] * Bpr[i ]
+ Apr[i+1] * Bpr[i+1]
+ Apr[i+2] * Bpr[i+2]
+ Apr[i+3] * Bpr[i+3]
+ Apr[i+4] * Bpr[i+4]
+ Apr[i+5] * Bpr[i+5]
+ Apr[i+6] * Bpr[i+6]
+ Apr[i+7] * Bpr[i+7]
+ Apr[i+8] * Bpr[i+8]
+ Apr[i+9] * Bpr[i+9];
}
}
}
z.r = (RealKind) sr;
return z;
}
/*----------------------------------------------------------------------------------------
* Outer Product calculation. Use custom loops for all of the special cases involving
* conjugates and transposes to minimize the total number of operations involved.
*---------------------------------------------------------------------------------------- */
#ifdef _OPENMP
/*------------------------------------------------------------------------------------------
* Interface for OpenMP enabled compiling.
*------------------------------------------------------------------------------------------ */
void RealKindOuterProduct(mwSignedIndex m, mwSignedIndex n,
RealKind *Apr, RealKind *Api, char transa,
RealKind *Bpr, RealKind *Bpi, char transb,
RealKind *Cpr, RealKind *Cpi, int outer_method)
{
int t;
RealKind ai, bi;
t = max_threads;
if( n < t ) t = n;
if( outer_method != METHOD_LOOPS_OMP ||
(mtimesx_mode == MTIMESX_SPEED_OMP && m*n < OMP_OUTER_SMALL) ) {
if( debug_message ) {
if( outer_method == METHOD_BLAS ) {
if( debug_message ) {
if( Apr != Bpr || 1 ) { /* Force calls to generic xGER instead of xSYR */
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xGER) "\n");
} else {
if( Api ) {
ai = ( transa == 'G' || transa == 'C' ) ? -one : one;
bi = ( transb == 'G' || transb == 'C' ) ? -one : one;
if( ai == bi ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYR) " and " TOKENSTRING(xSYR2) "\n");
} else {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYR) " and " TOKENSTRING(xGER) "\n");
}
} else {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYR) "\n");
}
}
debug_message = 0;
}
} else {
mexPrintf("MTIMESX: LOOPS outer product\n");
}
debug_message = 0;
}
RealKindOuterProductX(m, n, Apr, Api, transa, Bpr, Bpi, transb, Cpr, Cpi, outer_method);
} else {
if( debug_message ) {
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS outer product\n");
debug_message = 0;
}
omp_set_dynamic(1);
#pragma omp parallel num_threads(t)
{
RealKind *Bpr_, *Bpi_, *Cpr_, *Cpi_;
mwSize n_, blocksize, offset;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
blocksize = n / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
n_ = n - offset;
} else {
n_ = blocksize;
}
Bpr_ = Bpr + offset;
if( Bpi ) {
Bpi_ = Bpi + offset;
} else {
Bpi_ = NULL;
}
Cpr_ = Cpr + offset * m;
if( Cpi ) {
Cpi_ = Cpi + offset * m;
} else {
Cpi_ = NULL;
}
RealKindOuterProductX(m, n_, Apr, Api, transa, Bpr_, Bpi_, transb, Cpr_, Cpi_, outer_method);
}
}
}
void RealKindOuterProductX(mwSignedIndex m, mwSignedIndex n,
RealKind *Apr, RealKind *Api, char transa,
RealKind *Bpr, RealKind *Bpi, char transb,
RealKind *Cpr, RealKind *Cpi, int outer_method)
#else
/*------------------------------------------------------------------------------------------
* Interface for non-OpenMP enabled compiling.
*------------------------------------------------------------------------------------------ */
void RealKindOuterProduct(mwSignedIndex m, mwSignedIndex n,
RealKind *Apr, RealKind *Api, char transa,
RealKind *Bpr, RealKind *Bpi, char transb,
RealKind *Cpr, RealKind *Cpi, int outer_method)
#endif
{
register mwSignedIndex i, j;
mwSignedIndex kk;
mwSignedIndex inc = 1;
RealKind One = one;
RealKind ai, bi, aibi;
char uplo = 'L';
if( outer_method == METHOD_BLAS ) {
ai = ( transa == 'G' || transa == 'C' ) ? -one : one;
bi = ( transb == 'G' || transb == 'C' ) ? -one : one;
aibi = - ai * bi;
if( Apr != Bpr || 1 ) { /* Force calls to generic xGER instead of xSYR */
#ifndef _OPENMP
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xGER) "\n");
debug_message = 0;
}
#endif
xGER( M, N, ONE, Apr, INCX, Bpr, INCY, Cpr, M );
if( Api ) {
xGER( M, N, AI, Api, INCX, Bpr, INCY, Cpi, M );
if( Bpi ) {
xGER( M, N, AIBI, Api, INCX, Bpi, INCY, Cpr, M );
xGER( M, N, BI, Apr, INCX, Bpi, INCY, Cpi, M );
}
} else if( Bpi ) {
xGER( M, N, BI, Apr, INCX, Bpi, INCY, Cpi, M );
}
} else {
xSYR( UPLO, M, ONE, Apr, INCX, Cpr, M );
if( Api ) {
xSYR( UPLO, M, AIBI, Api, INCX, Cpr, M );
if( ai == bi ) {
#ifndef _OPENMP
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYR) " and " TOKENSTRING(xSYR2) "\n");
debug_message = 0;
}
#endif
xSYR2( UPLO, M, AI, Apr, INCX, Api, INCY, Cpi, M );
xFILLPOS(Cpi, m);
} else {
#ifndef _OPENMP
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYR) " and " TOKENSTRING(xGER) "\n");
debug_message = 0;
}
#endif
xGER( M, N, BI, Apr, INCX, Api, INCY, Cpi, M );
xFILLNEG(Cpi, m);
}
#ifndef _OPENMP
} else {
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xSYR) "\n");
debug_message = 0;
}
#endif
}
xFILLPOS(Cpr, m);
}
return;
}
#ifndef _OPENMP
if( debug_message ) {
mexPrintf("MTIMESX: LOOPS outer product\n");
debug_message = 0;
}
#endif
kk = 0;
if( Api ) {
if( Bpi ) {
if( (transa == 'C' || transa == 'G') && (transb == 'C' || transb == 'G') ) {
for( j=0; j<n; j++ ) { /* (ar + bi*i)(C or G) * (br + bi*i)(C or G) */
for( i=0; i<m; i++ ) {
Cpr[kk] = Apr[i] * Bpr[j] - Api[i] * Bpi[j];
Cpi[kk] = - Apr[i] * Bpi[j] - Api[i] * Bpr[j];
kk++;
}
}
} else if( (transa == 'C' || transa == 'G') && (transb == 'N' || transb == 'T') ) {
for( j=0; j<n; j++ ) { /* (ar + bi*i)(C or G) * (br + bi*i)(N or T) */
for( i=0; i<m; i++ ) {
Cpr[kk] = Apr[i] * Bpr[j] + Api[i] * Bpi[j];
Cpi[kk] = Apr[i] * Bpi[j] - Api[i] * Bpr[j];
kk++;
}
}
} else if( (transa == 'N' || transa == 'T') && (transb == 'C' || transb == 'G') ) {
for( j=0; j<n; j++ ) { /* (ar + bi*i)(N or T) * (br + bi*i)(C or G) */
for( i=0; i<m; i++ ) {
Cpr[kk] = Apr[i] * Bpr[j] + Api[i] * Bpi[j];
Cpi[kk] = Api[i] * Bpr[j] - Apr[i] * Bpi[j];
kk++;
}
}
} else { /* (ar + bi*i)(N or T) * (br + bi*i)(N or T) */
for( j=0; j<n; j++ ) {
for( i=0; i<m; i++ ) {
Cpr[kk] = Apr[i] * Bpr[j] - Api[i] * Bpi[j];
Cpi[kk] = Apr[i] * Bpi[j] + Api[i] * Bpr[j];
kk++;
}
}
}
} else {
if( transa == 'C' || transa == 'G' ) {
for( j=0; j<n; j++ ) { /* (ar + bi*i)(C or G) * (br)(N or T or C or G) */
for( i=0; i<m; i++ ) {
Cpr[kk] = Apr[i] * Bpr[j];
Cpi[kk] = - Api[i] * Bpr[j];
kk++;
}
}
} else {
for( j=0; j<n; j++ ) { /* (ar + bi*i)(N or T) * (br)(N or T or C or G) */
for( i=0; i<m; i++ ) {
Cpr[kk] = Apr[i] * Bpr[j];
Cpi[kk] = Api[i] * Bpr[j];
kk++;
}
}
}
}
} else {
if( Bpi ) {
if( transb == 'C' || transb == 'G' ) {
for( j=0; j<n; j++ ) { /* (ar)(N or T or C or G) * (br + bi*i)(C or G) */
for( i=0; i<m; i++ ) {
Cpr[kk] = Apr[i] * Bpr[j];
Cpi[kk] = - Apr[i] * Bpi[j];
kk++;
}
}
} else {
for( j=0; j<n; j++ ) { /* (ar)(N or T or C or G) * (br + bi*i)(N or T) */
for( i=0; i<m; i++ ) {
Cpr[kk] = Apr[i] * Bpr[j];
Cpi[kk] = Apr[i] * Bpi[j];
kk++;
}
}
}
} else {
for( j=0; j<n; j++ ) { /* (ar)(N or T or C or G) * (br)(N or T or C or G) */
for( i=0; i<m; i++ ) {
Cpr[kk++] = Apr[i] * Bpr[j];
}
}
}
}
}
/*--------------------------------------------------------------------------------------
*--------------------------------------------------------------------------------------
*--------------------------------------------------------------------------------------
*--------------------------------------------------------------------------------------
*-------------------------------------------------------------------------------------- */
mxArray *RealScalarTimesReal(mxArray *A, char transa, mwSize m1, mwSize n1,
mxArray *B, char transb, mwSize m2, mwSize n2)
{
mwSize nzmax, Bndim, Cndim, Cp, p;
mwSize *Bdims, *Cdims;
mwSignedIndex mn, n, k;
mxArray *C, *result, *Bt = NULL;
RealKind *Apr, *Api, *Bpr, *Bpi, *Cpr, *Cpi;
RealKind ar, ai, br, bi, sr, si;
char trans;
mxComplexity complexflag;
mwIndex *jc;
int scalar_method;
/*----- */
if( m2 == 1 && n2 == 1 ) { /* Make sure scalar is A and array is B */
if( m1 == 1 && n1 == 1 ) { /* Check for scalar * scalar */
if( debug_message ) {
mexPrintf("MTIMESX: Inline (scalar * scalar) code\n");
debug_message = 0;
}
Apr = mxGetData(A);
ar = *Apr;
Api = mxGetImagData(A);
ai = Api ? *Api : zero;
if( transa == 'C' || transa == 'G' ) {
ai = -ai;
}
Bpr = mxGetData(B);
br = *Bpr;
Bpi = mxGetImagData(B);
bi = Bpi ? *Bpi : zero;
if( transb == 'C' || transb == 'G' ) {
bi = -bi;
}
sr = (RealKind) (((double)ar) * ((double)br) - ((double)ai) * ((double)bi));
si = (RealKind) (((double)ar) * ((double)bi) + ((double)ai) * ((double)br));
complexflag = (si == zero) ? mxREAL : mxCOMPLEX;
if( mxIsSparse(A) || mxIsSparse(B) ) {
result = mxCreateSparse(1, 1, 1, complexflag);
*mxGetIr(result) = 0;
jc = mxGetJc(result);
jc[0] = 0;
jc[1] = 1;
} else {
result = mxCreateNumericMatrix(1, 1, MatlabReturnType, complexflag);
}
Cpr = mxGetData(result);
*Cpr = (RealKind) sr;
if( complexflag == mxCOMPLEX ) {
Cpi = mxGetImagData(result);
*Cpi = (RealKind) si;
}
return result;
} else {
C = A;
A = B;
B = C;
trans = transa;
transa = transb;
transb = trans;
}
}
/*--------------------------------------------------------------------------------------
* Check for multiplying by 1 and no actual transpose or conjugate is involved. In this
* case just return a shared data copy, which is very fast since no data copying is done.
* Also, if there *is* a transpose but the first two dimensions are a vector and there
* is no actual conjugate involved, we can return a shared data copy with a slight
* modification of the dimensions ... just switch the first two.
*-------------------------------------------------------------------------------------- */
Apr = mxGetData(A);
Api = mxGetImagData(A);
ar = *Apr;
ai = Api ? ((transa == 'C' || transa == 'G') ? -(*Api) :*Api) : zero;
Bpr = mxGetData(B);
Bpi = mxGetImagData(B);
Bndim = mxGetNumberOfDimensions(B);
Bdims = (mwSize *) mxGetDimensions(B);
if( ar == one && ai == zero ) {
if( transb == 'N' || (transb == 'G' && Bpi == NULL) ) {
if( debug_message ) {
mexPrintf("MTIMESX: Shared data copy\n");
}
result = mxCreateSharedDataCopy(B);
return result;
} else if( (Bdims[0] == 1 || Bdims[1] == 1) && (transb == 'T' || (transb == 'C' && Bpi == NULL)) ) {
if( debug_message ) {
mexPrintf("MTIMESX: Shared data copy\n");
}
result = mxCreateSharedDataCopy(B);
Cdims = mxMalloc( Bndim * sizeof(*Cdims) );
for( Cp=2; Cp<Bndim; Cp++) {
Cdims[Cp] = Bdims[Cp];
}
Cdims[0] = Bdims[1];
Cdims[1] = Bdims[0];
mxSetDimensions(result,Cdims,Bndim);
mxFree(Cdims);
return result;
}
}
/*--------------------------------------------------------------------------------------
* For sparse matrix, do the transpose now and then do the scalar multiply. That way if
* B is real and A is complex we are only doing the transpose work on one array.
*-------------------------------------------------------------------------------------- */
if( (transb == 'T' || transb == 'C') && mxIsSparse(B) ) {
if( debug_message ) {
mexPrintf("MTIMESX: Callback to MATLAB to transpose a sparse matrix\n");
}
mexCallMATLAB(1, &Bt, 1, &B, "transpose");
B = Bt;
transb = (transb == 'T') ? 'N' : 'G';
}
if( mxIsSparse(B) ) {
jc = mxGetJc(B);
n2 = mxGetN(B);
n = jc[n2];
} else {
n = mxGetNumberOfElements(B);
}
if( n < 0 ) {
mexErrMsgTxt("Number of elements too large ... overflows a signed integer");
}
complexflag = (n > 0 && (mxIsComplex(A) || mxIsComplex(B))) ? mxCOMPLEX : mxREAL;
/*-------------------------------------------------------------------------------
* Construct the dimensions of the result. Also use the p variable to keep track
* of the total number of individual matrix multiples that are involved. The
* first two dimensions are simply the result of a single matrix multiply, with
* accouting for the transb pre-operation. The remaining dimensions are simply
* copied from B.
*------------------------------------------------------------------------------- */
Cndim = Bndim;
Cdims = mxMalloc( Cndim * sizeof(*Cdims) );
if( transb == 'N' || transb == 'G' ) {
Cdims[0] = Bdims[0];
Cdims[1] = Bdims[1];
} else {
Cdims[0] = Bdims[1];
Cdims[1] = Bdims[0];
}
p = 1;
for( Cp=2; Cp<Cndim; Cp++) {
p *= (Cdims[Cp] = Bdims[Cp]);
}
/*------------------------------------------------------------------------------
* Create output array
*------------------------------------------------------------------------------ */
if( mxIsSparse(B) ) {
result = mxCreateSparse(Cdims[0], Cdims[1], n, complexflag);
memcpy(mxGetIr(result), mxGetIr(B), n * sizeof(mwIndex));
memcpy(mxGetJc(result), mxGetJc(B), (n2+1) * sizeof(mwIndex));
} else if( mxGetNumberOfElements(B) == 0 ) {
result = mxCreateNumericArray(Cndim, Cdims, MatlabReturnType, mxREAL);
mxFree(Cdims);
return result;
} else {
result = mxCreateNumericArray(Cndim, Cdims, MatlabReturnType, complexflag);
}
mxFree(Cdims);
C = result;
Cpr = mxGetData(C);
Cpi = mxGetImagData(C);
m2 = Bdims[0];
n2 = Bdims[1];
mn = m2 * n2;
if( n == 0 ) { /* If result is empty, just return right now */
return result;
}
C = result;
Cpr = mxGetData(C);
Cpi = mxGetImagData(C);
Bpr = mxGetData(B);
Bpi = mxGetImagData(B);
/*------------------------------------------------------------------------------------------------
* Check for matlab flag. If set, then use a BLAS call for this function.
*------------------------------------------------------------------------------------------------ */
/* if( matlab ) {
* Future upgrade ... insert BLAS calls here and return?
* } */
/*------------------------------------------------------------------------------------------------
* If the matrix is really a vector, then there is no need to do an actual transpose. We can
* simply strip the transpose operation away and rely on the dimensions to do the transpose.
*------------------------------------------------------------------------------------------------ */
if( m2 ==1 || n2 == 1 ) {
if( transb == 'T' ) transb = 'N';
if( transb == 'C' ) transb = 'G';
}
/*----------------------------------------------------------------------------
* Get scalar product method to use.
*---------------------------------------------------------------------------- */
switch( mtimesx_mode )
{
case MTIMESX_BLAS:
scalar_method = METHOD_BLAS;
break;
case MTIMESX_LOOPS:
scalar_method = METHOD_LOOPS;
break;
case MTIMESX_LOOPS_OMP:
scalar_method = METHOD_LOOPS_OMP;
break;
case MTIMESX_SPEED_OMP:
if( max_threads > 1 ) {
scalar_method = METHOD_LOOPS_OMP;
break;
}
case MTIMESX_MATLAB:
case MTIMESX_SPEED:
if( ai != zero && Bpi ) {
scalar_method = METHOD_LOOPS;
} else {
scalar_method = METHOD_BLAS;
}
break;
}
/*------------------------------------------------------------------------------------------------
* Do the scalar multiply.
*------------------------------------------------------------------------------------------------ */
RealTimesScalar(Cpr, Cpi, Bpr, Bpi, transb, m2, n2, ar, ai, n, p, scalar_method);
/*-------------------------------------------------------------------------------------------
* If the imaginary part is all zero, then free it and set the pointer to NULL.
*------------------------------------------------------------------------------------------- */
if( AllRealZero(Cpi, n) ) {
mxFree(Cpi);
Cpi = NULL;
mxSetImagData(C, NULL);
}
/*-------------------------------------------------------------------------------------------
* Clean up sparse matrix and realloc if appropriate.
*------------------------------------------------------------------------------------------- */
if( mxIsSparse(C) ) {
nzmax = mxGetNzmax(C);
k = spclean(C);
if( k == 0 ) k = 1;
if( nzmax - k > REALLOCTOL ) {
if( debug_message ) {
mexPrintf("MTIMESX: Reallocate sparse matrix\n");
}
mxSetPr(C, myRealloc(Cpr, k * sizeof(RealKind)));
mxSetIr(C, myRealloc(mxGetIr(C), k * sizeof(mwIndex)));
if( Cpi != NULL ) {
mxSetPi(C, myRealloc(Cpi, k * sizeof(RealKind)));
}
mxSetNzmax(C, k);
}
}
if( Bt != NULL ) {
mxDestroyArray(Bt);
}
return result;
}
/*--------------------------------------------------------------------------------------
*--------------------------------------------------------------------------------------
*--------------------------------------------------------------------------------------
*--------------------------------------------------------------------------------------
*-------------------------------------------------------------------------------------- */
/*-------------------------------------------------------------------------------------------
* Scalar times array slice.
*------------------------------------------------------------------------------------------- */
/*-------------------------------------------------------------------------------------------
* OpenMP interface.
*------------------------------------------------------------------------------------------- */
#ifdef _OPENMP
void RealTimesScalar(RealKind *Cpr, RealKind *Cpi, RealKind *Bpr, RealKind *Bpi, char transb,
mwSize m2, mwSize n2, RealKind ar, RealKind ai, mwSize n, mwSize p,
int scalar_method)
{
mwSize q;
if( transb == 'N' || transb == 'G' ) {
q = n;
} else {
q = m2 * n2;
}
/*------------------------------------------------------------------------------------------------
* For small sizes, don't bother with the OpenMP overhead unless forced.
*------------------------------------------------------------------------------------------------ */
if( scalar_method != METHOD_LOOPS_OMP ||
(mtimesx_mode == MTIMESX_SPEED_OMP && q < OMP_SCALAR_SMALL) ) {
if( debug_message ) {
if( scalar_method == METHOD_BLAS && (transb == 'N' || transb == 'G') ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xAXPY) "\n");
} else {
mexPrintf("MTIMESX: LOOPS scalar multiply\n");
}
debug_message = 0;
}
RealTimesScalarX(Cpr, Cpi, Bpr, Bpi, transb, m2, n2, ar, ai, n, p, scalar_method);
} else {
if( debug_message ) {
mexPrintf("MTIMESX: OpenMP multi-threaded LOOPS scalar multiply\n");
debug_message = 0;
}
omp_set_dynamic(1);
#pragma omp parallel num_threads(max_threads)
{
RealKind *Cpr_, *Cpi_, *Bpr_, *Bpi_;
mwSize n_, p_, blocksize, offset, f;
int thread_num = omp_get_thread_num();
int num_threads = omp_get_num_threads();
#pragma omp master
{
threads_used = num_threads;
}
if( transb == 'N' || transb == 'G' ) {
blocksize = n / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
n_ = n - offset;
} else {
n_ = blocksize;
}
f = offset;
} else {
blocksize = p / num_threads;
offset = thread_num * blocksize;
if( thread_num == num_threads-1 ) {
p_ = p - offset;
} else {
p_ = blocksize;
}
f = offset * m2 * n2;
}
Cpr_ = Cpr + f;
if( Cpi ) {
Cpi_ = Cpi + f;
} else {
Cpi_ = NULL;
}
Bpr_ = Bpr + f;
if( Bpi ) {
Bpi_ = Bpi + f;
} else {
Bpi_ = NULL;
}
RealTimesScalarX(Cpr_, Cpi_, Bpr_, Bpi_, transb, m2, n2, ar, ai, n_, p_, scalar_method);
}
}
}
void RealTimesScalarX(RealKind *Cpr, RealKind *Cpi, RealKind *Bpr, RealKind *Bpi, char transb,
mwSize m2, mwSize n2, RealKind ar, RealKind ai, mwSize n, mwSize p,
int scalar_method)
#else
/*-------------------------------------------------------------------------------------------
* Non-OpenMP interface.
*------------------------------------------------------------------------------------------- */
void RealTimesScalar(RealKind *Cpr, RealKind *Cpi, RealKind *Bpr, RealKind *Bpi, char transb,
mwSize m2, mwSize n2, RealKind ar, RealKind ai, mwSize n, mwSize p,
int scalar_method)
#endif
{
mwSignedIndex inc = 1, k = n;
/*------------------------------------------------------------------------------------------------
* If scalar multiply mode is BLAS mode and there is no actual transpose involved then do the
* BLAS calls now.
*------------------------------------------------------------------------------------------------ */
if( scalar_method == METHOD_BLAS && (transb == 'N' || transb == 'G') ) {
#ifndef _OPENMP
if( debug_message ) {
mexPrintf("MTIMESX: BLAS calls to " TOKENSTRING(xAXPY) "\n");
debug_message = 0;
}
#endif
xAXPY( K, AR, Bpr, INCX, Cpr, INCY );
if( Cpi ) {
if( ai != zero ) {
xAXPY( K, AI, Bpr, INCX, Cpi, INCY );
if( Bpi ) {
if( transb == 'N' ) ai = -ai;
xAXPY( K, AI, Bpi, INCX, Cpr, INCY );
}
}
if( Bpi ) {
if( transb == 'G' ) ar = -ar;
xAXPY( K, AR, Bpi, INCX, Cpi, INCY );
}
}
return;
}
/*------------------------------------------------------------------------------------------------
* Some specialized cases, no need to multiply by +1 or -1, we can program that directly into the
* calculations without a multiply. We do need to multiply by zero, however, so that the sign of
* any -0 that might be present gets carried over into the result, and also any inf or NaN that is
* present gets a proper result. So no special code for multiplying by zero.
*------------------------------------------------------------------------------------------------ */
#ifndef _OPENMP
if( debug_message ) {
mexPrintf("MTIMESX: LOOPS scalar multiply\n");
debug_message = 0;
}
#endif
if( ar == one ) {
if( ai == one ) {
if( transb == 'N' ) {
RealKindEqP1P1TimesRealKindN(Cpr, Cpi, Bpr, Bpi, n); /* C = (1 + 1*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqP1P1TimesRealKindG(Cpr, Cpi, Bpr, Bpi, n); /* C = (1 + 1*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqP1P1TimesRealKindT(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (1 + 1*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqP1P1TimesRealKindC(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (1 + 1*i) * (Bpr + Bpi * i)C */
}
} else if( ai == -one ) {
if( transb == 'N' ) {
RealKindEqP1M1TimesRealKindN(Cpr, Cpi, Bpr, Bpi, n); /* C = (1 - 1*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqP1M1TimesRealKindG(Cpr, Cpi, Bpr, Bpi, n); /* C = (1 - 1*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqP1M1TimesRealKindT(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (1 - 1*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqP1M1TimesRealKindC(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (1 - 1*i) * (Bpr + Bpi * i)C */
}
} else if( ai == zero ) {
if( transb == 'N' ) { /* this case never reached ... it is the shared data copy above */
RealKindEqP1P0TimesRealKindN(Cpr, Cpi, Bpr, Bpi, n); /* C = (1 + 0*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqP1P0TimesRealKindG(Cpr, Cpi, Bpr, Bpi, n); /* C = (1 + 0*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqP1P0TimesRealKindT(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (1 + 0*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqP1P0TimesRealKindC(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (1 + 0*i) * (Bpr + Bpi * i)C */
}
} else {
if( transb == 'N' ) {
RealKindEqP1PxTimesRealKindN(Cpr, Cpi, ai, Bpr, Bpi, n); /* C = (1 + ai*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqP1PxTimesRealKindG(Cpr, Cpi, ai, Bpr, Bpi, n); /* C = (1 + ai*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqP1PxTimesRealKindT(Cpr, Cpi, ai, Bpr, Bpi, m2, n2, p); /* C = (1 + ai*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqP1PxTimesRealKindC(Cpr, Cpi, ai, Bpr, Bpi, m2, n2, p); /* C = (1 + ai*i) * (Bpr + Bpi * i)C */
}
}
} else if( ar == -one ) {
if( ai == one ) {
if( transb == 'N' ) {
RealKindEqM1P1TimesRealKindN(Cpr, Cpi, Bpr, Bpi, n); /* C = (-1 + 1*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqM1P1TimesRealKindG(Cpr, Cpi, Bpr, Bpi, n); /* C = (-1 + 1*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqM1P1TimesRealKindT(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (-1 + 1*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqM1P1TimesRealKindC(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (-1 + 1*i) * (Bpr + Bpi * i)C */
}
} else if( ai == -one ) {
if( transb == 'N' ) {
RealKindEqM1M1TimesRealKindN(Cpr, Cpi, Bpr, Bpi, n); /* C = (-1 - 1*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqM1M1TimesRealKindG(Cpr, Cpi, Bpr, Bpi, n); /* C = (-1 - 1*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqM1M1TimesRealKindT(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (-1 - 1*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqM1M1TimesRealKindC(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (-1 - 1*i) * (Bpr + Bpi * i)C */
}
} else if( ai == zero ) {
if( transb == 'N' ) {
RealKindEqM1P0TimesRealKindN(Cpr, Cpi, Bpr, Bpi, n); /* C = (-1 + 0*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqM1P0TimesRealKindG(Cpr, Cpi, Bpr, Bpi, n); /* C = (-1 + 0*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqM1P0TimesRealKindT(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (-1 + 0*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqM1P0TimesRealKindC(Cpr, Cpi, Bpr, Bpi, m2, n2, p); /* C = (-1 + 0*i) * (Bpr + Bpi * i)C */
}
} else {
if( transb == 'N' ) {
RealKindEqM1PxTimesRealKindN(Cpr, Cpi, ai, Bpr, Bpi, n); /* C = (-1 + ai*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqM1PxTimesRealKindG(Cpr, Cpi, ai, Bpr, Bpi, n); /* C = (-1 + ai*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqM1PxTimesRealKindT(Cpr, Cpi, ai, Bpr, Bpi, m2, n2, p); /* C = (-1 + ai*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqM1PxTimesRealKindC(Cpr, Cpi, ai, Bpr, Bpi, m2, n2, p); /* C = (-1 + ai*i) * (Bpr + Bpi * i)C */
}
}
} else { /* ar != one && ar != -one */
if( ai == one ) {
if( transb == 'N' ) {
RealKindEqPxP1TimesRealKindN(Cpr, Cpi, ar, Bpr, Bpi, n); /* C = (ar + 1*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqPxP1TimesRealKindG(Cpr, Cpi, ar, Bpr, Bpi, n); /* C = (ar + 1*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqPxP1TimesRealKindT(Cpr, Cpi, ar, Bpr, Bpi, m2, n2, p); /* C = (ar + 1*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqPxP1TimesRealKindC(Cpr, Cpi, ar, Bpr, Bpi, m2, n2, p); /* C = (ar + 1*i) * (Bpr + Bpi * i)C */
}
} else if( ai == -one ) {
if( transb == 'N' ) {
RealKindEqPxM1TimesRealKindN(Cpr, Cpi, ar, Bpr, Bpi, n); /* C = (ar - 1*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqPxM1TimesRealKindG(Cpr, Cpi, ar, Bpr, Bpi, n); /* C = (ar - 1*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqPxM1TimesRealKindT(Cpr, Cpi, ar, Bpr, Bpi, m2, n2, p); /* C = (ar - 1*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqPxM1TimesRealKindC(Cpr, Cpi, ar, Bpr, Bpi, m2, n2, p); /* C = (ar - 1*i) * (Bpr + Bpi * i)C */
}
} else if( ai == zero ) {
if( transb == 'N' ) {
RealKindEqPxP0TimesRealKindN(Cpr, Cpi, ar, Bpr, Bpi, n); /* C = (ar + 0*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqPxP0TimesRealKindG(Cpr, Cpi, ar, Bpr, Bpi, n); /* C = (ar + 0*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqPxP0TimesRealKindT(Cpr, Cpi, ar, Bpr, Bpi, m2, n2, p); /* C = (ar + 0*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqPxP0TimesRealKindC(Cpr, Cpi, ar, Bpr, Bpi, m2, n2, p); /* C = (ar + 0*i) * (Bpr + Bpi * i)C */
}
} else {
if( transb == 'N' ) {
RealKindEqPxPxTimesRealKindN(Cpr, Cpi, ar, ai, Bpr, Bpi, n); /* C = (ar + ai*i) * (Bpr + Bpi * i) */
} else if( transb == 'G' ) {
RealKindEqPxPxTimesRealKindG(Cpr, Cpi, ar, ai, Bpr, Bpi, n); /* C = (ar + ai*i) * (Bpr - Bpi * i) */
} else if( transb == 'T' ) {
RealKindEqPxPxTimesRealKindT(Cpr, Cpi, ar, ai, Bpr, Bpi, m2, n2, p); /* C = (ar + ai*i) * (Bpr + Bpi * i)T */
} else { /* if( transb == 'C' ) { */
RealKindEqPxPxTimesRealKindC(Cpr, Cpi, ar, ai, Bpr, Bpi, m2, n2, p); /* C = (ar + ai*i) * (Bpr + Bpi * i)C */
}
}
}
}
|
naive_math_impl.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <cmath>
template <typename type>
static void basic_trans_mat_to_c4(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 3) / 4 * 4;
int k_round = (K + 3) / 4 * 4;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 4;
type* zero_buf = new type[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 4 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
if (4 * (i + 1) - M > 0) {
switch (4 * (i + 1) - M) {
case 3:
in1 = zero_buf;
case 2:
in2 = zero_buf;
case 1:
in3 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
delete[] zero_buf;
}
template <typename type>
static void basic_trans_mat_to_c8(const type* input,
type* output,
const int ldin,
const int M,
const int K,
bool pack_k) {
const int m_round = (M + 7) / 8 * 8;
int k_round = (K + 7) / 8 * 8;
if (!pack_k) {
k_round = K;
}
const int m_loop = m_round / 8;
type zero_buf[K];
memset(zero_buf, 0, K * sizeof(type));
for (int i = 0; i < m_loop; ++i) {
const type* in0 = input + i * 8 * ldin;
const type* in1 = in0 + ldin;
const type* in2 = in1 + ldin;
const type* in3 = in2 + ldin;
const type* in4 = in3 + ldin;
const type* in5 = in4 + ldin;
const type* in6 = in5 + ldin;
const type* in7 = in6 + ldin;
if (8 * (i + 1) - M > 0) {
switch (8 * (i + 1) - M) {
case 7:
in1 = zero_buf;
case 6:
in2 = zero_buf;
case 5:
in3 = zero_buf;
case 4:
in4 = zero_buf;
case 3:
in5 = zero_buf;
case 2:
in6 = zero_buf;
case 1:
in7 = zero_buf;
default:
break;
}
}
for (int j = 0; j < K; ++j) {
*output++ = *in0++;
*output++ = *in1++;
*output++ = *in2++;
*output++ = *in3++;
*output++ = *in4++;
*output++ = *in5++;
*output++ = *in6++;
*output++ = *in7++;
}
for (int j = K; j < k_round; ++j) {
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
*output++ = static_cast<type>(0);
}
}
}
template <typename type, typename type2>
static void basic_gemm_c4(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm_c8(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2)));
memset(tmp_c, 0, m * ldc * sizeof(type2));
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data;
if (flag_relu) {
tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
tmp_c[i * ldc + j] = tmp;
}
}
}
//! trans c to c4
basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false);
free(tmp_c);
}
template <typename type, typename type2>
static void basic_gemm(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data;
if (flag_relu) {
c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
c[i * ldc + j] = tmp;
}
}
}
}
template <typename type, typename type2>
static void basic_gemv(int m,
int k,
const type* a,
const type* b,
const type2* bias,
type2* c,
type2 alpha,
type2 beta,
bool trans_a = false,
bool flag_bias = false,
int flag_act = false,
float six = 6.f,
float leakey_relu_alpha = 1.f) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
auto sum = static_cast<type2>(0);
for (int j = 0; j < k; ++j) {
type av;
if (trans_a) {
av = a[j * m + i];
} else {
av = a[i * k + j];
}
sum += av * b[j];
}
type2 tmp = alpha * sum + beta * c[i] + bias_data;
if (flag_act > 0) {
if (flag_act == 1) { // relu
c[i] = tmp > (type2)0 ? tmp : (type2)0;
} else if (flag_act == 2) { // relu 6
c[i] = tmp > (type2)0 ? tmp : (type2)0;
c[i] = c[i] < six ? c[i] : six; // ut compute
} else if (flag_act == 4) { // leakey relu
c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp;
}
} else {
c[i] = tmp;
}
}
}
/**
* \brief basic direct convolution function
*/
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
static void conv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
int act_type,
float six = 6.f,
float scale = 1.f) {
Dtype2 beta = 0;
auto src_data = din;
auto dst_data_ref = dout;
auto weights_data = weights;
auto with_bias = flag_bias;
auto bias_data = bias;
int in_num = num;
int out_channels = chout;
int out_h = hout;
int out_w = wout;
int in_channel = chin;
int in_h = hin;
int in_w = win;
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
for (int n = 0; n < in_num; ++n) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w +
g * out_c_group * out_h * out_w + oc * out_h * out_w +
oh * out_w + ow;
Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0;
dst_data_ref[out_idx] = bias_d; // + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dila_w);
int ih = oh * stride_h - pad_h + kh * (dila_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w +
g * in_c_group * in_h * in_w + ic * in_h * in_w +
ih * in_w + iw;
int widx =
g * out_c_group * in_c_group * kernel_h * kernel_w +
oc * in_c_group * kernel_h * kernel_w +
ic * kernel_h * kernel_w + kh * kernel_w + kw;
dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx];
}
}
}
if (act_type > 0) {
// 1-relu 2-relu6 4-leakyrelu
if (act_type == 1) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
} else if (act_type == 2) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six
? dst_data_ref[out_idx]
: (Dtype2)six;
} else if (act_type == 4) {
dst_data_ref[out_idx] =
dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)(dst_data_ref[out_idx] * scale);
} else {
printf("this act type: %d does not support \n", act_type);
}
}
}
}
}
}
}
}
template <typename Dtype>
static void fill_bias_relu(Dtype* tensor,
const Dtype* bias,
int channel,
int channel_size,
bool flag_bias,
bool flag_relu) {
Dtype* data = tensor;
for (int j = 0; j < channel; ++j) {
Dtype bias_c = flag_bias ? bias[j] : 0;
for (int i = 0; i < channel_size; i++) {
data[i] += bias_c;
if (flag_relu) {
data[i] = data[i] > 0 ? data[i] : 0.f;
}
}
data += channel_size;
}
}
template <typename Dtype>
static void do_relu(Dtype* tensor, int size) {
for (int j = 0; j < size; ++j) {
tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0;
}
}
inline bool is_a_ge_zero_and_a_lt_b(int a, int b) {
return static_cast<unsigned>(a) < static_cast<unsigned>(b);
}
template <typename Dtype>
static void col2im(const Dtype* data_col,
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int pad_h0,
const int pad_h1,
const int pad_w0,
const int pad_w1,
const int stride_h,
const int stride_w,
const int dilation_h,
const int dilation_w,
Dtype* data_im) {
memset(data_im, 0, height * width * channels * sizeof(Dtype));
const int output_h =
(height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) /
stride_h +
1;
const int output_w =
(width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) {
for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) {
int input_row = -pad_h0 + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!is_a_ge_zero_and_a_lt_b(input_row, height)) {
data_col += output_w;
} else {
int input_col = -pad_w0 + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (is_a_ge_zero_and_a_lt_b(input_col, width)) {
data_im[input_row * width + input_col] += *data_col;
}
data_col++;
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deconv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w0,
int pad_w1,
int pad_h0,
int pad_h1,
bool flag_bias,
bool flag_relu) {
int m = chout * kernel_w * kernel_h / group;
int n = hin * win;
int k = chin / group;
int group_size_in = win * hin * chin / group;
int group_size_coldata = m * n;
int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group);
bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) &&
(stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) &&
(pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) &&
(dila_h == 1);
Dtype2* workspace_ptr =
static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group));
for (int i = 0; i < num; ++i) {
const Dtype1* din_batch = din + i * chin * hin * win;
Dtype2* dout_batch = dout + i * chout * hout * wout;
Dtype2* col_data = workspace_ptr;
if (flag_1x1s1p1) {
col_data = dout_batch;
}
memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group);
for (int g = 0; g < group; ++g) {
const Dtype1* din_group = din_batch + g * group_size_in;
const Dtype1* weights_group = weights + g * group_size_weights;
Dtype2* coldata_group = col_data + g * group_size_coldata;
basic_gemm<Dtype1, Dtype2>(true,
false,
m,
n,
k,
1,
weights_group,
m,
din_group,
n,
0,
coldata_group,
n,
nullptr,
false,
(!flag_bias && flag_relu));
}
if (!flag_1x1s1p1) {
col2im(col_data,
chout,
hout,
wout,
kernel_h,
kernel_w,
pad_h0,
pad_h1,
pad_w0,
pad_w1,
stride_h,
stride_w,
dila_h,
dila_w,
dout_batch);
}
//! add bias
if (flag_bias) {
fill_bias_relu(
dout_batch, bias, chout, wout * hout, flag_bias, flag_relu);
}
}
free(workspace_ptr);
}
float deformable_bilinear(const float* bottom_data,
const int data_width,
const int height,
const int width,
float h,
float w) {
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
if (h_low >= height - 1) {
h_high = h_low = height - 1;
h = static_cast<float>(h_low);
} else {
h_high = h_low + 1;
}
if (w_low >= width - 1) {
w_high = w_low = width - 1;
w = static_cast<float>(w_low);
} else {
w_high = w_low + 1;
}
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh;
float hw = 1 - lw;
float v1 = bottom_data[h_low * data_width + w_low];
float v2 = bottom_data[h_low * data_width + w_high];
float v3 = bottom_data[h_high * data_width + w_low];
float v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw;
float w2 = hh * lw;
float w3 = lh * hw;
float w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
void deformable_conv_basic(const Dtype1* in_data,
const float* offset_data,
const float* mask_data,
Dtype2* out_data,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
bool flag_relu,
bool modulated) {
int out_c_group = chout / group;
int in_c_group = chin / group;
int in_size = hin * win;
int out_size = hout * wout;
int c_in_size = chin * in_size;
int c_out_size = chout * out_size;
int kernel_size = kernel_w * kernel_h;
for (int n = 0; n < num; n++) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(4)
#endif
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < hout; oh++) {
for (int ow = 0; ow < wout; ow++) {
int out_idx = n * c_out_size + g * out_c_group * out_size +
oc * out_size + oh * wout + ow;
Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0;
out_data[out_idx] = bias_d + out_data[out_idx];
for (int ic = 0; ic < in_c_group; ++ic) {
for (int fh = 0; fh < kernel_h; fh++) {
for (int fw = 0; fw < kernel_w; fw++) {
const float* offset_data_ptr =
offset_data + n * group * 2 * kernel_size * out_size +
g * 2 * kernel_size * out_size;
const int data_offset_h_ptr =
((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow;
const int data_offset_w_ptr =
((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow;
const float offset_h = offset_data_ptr[data_offset_h_ptr];
const float offset_w = offset_data_ptr[data_offset_w_ptr];
const float iw =
ow * stride_w - pad_w + kernel_w * dila_w + offset_w;
const float ih =
oh * stride_h - pad_h + kernel_h * dila_h + offset_h;
if (ih >= 0 && ih < hin && iw >= 0 && iw < win) {
const float map_h = kernel_h * dila_h + offset_h;
const float map_w = kernel_w * dila_w + offset_w;
const int cur_height = hin - (oh * stride_h - pad_h);
const int cur_width = win - (ow * stride_w - pad_w);
const float* in_data_offset =
in_data + n * c_in_size +
(g * in_c_group + ic) * in_size +
(oh * stride_h - pad_h) * win + (ow * stride_w - pad_w);
float val = deformable_bilinear(in_data_offset,
win,
cur_height,
cur_width,
map_h,
map_w);
if (modulated) {
// use mask
const float* mask_ptr =
mask_data + n * group * kernel_size * out_size +
g * kernel_size * out_size +
(fh * kernel_w + fw) * hout * wout + oh * wout + ow;
val *= mask_ptr[0];
}
int widx = g * out_c_group * in_c_group * kernel_size +
oc * in_c_group * kernel_size +
ic * kernel_size + fh * kernel_w + fw;
out_data[out_idx] += val * weights[widx];
}
}
}
}
if (flag_relu) {
out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0;
}
}
}
}
}
}
}
|
image.c | #include "image.h"
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void iam2cool(float *im, int in_c, int in_w, int in_h, int f_size, int stride, int padd, int col_w, int col_h, int col_c, float *out) {
#pragma omp parallel for collapse(2)
for (int c = 0; c < col_c; c++) {
int w_off = c % f_size;
int h_off = (c / f_size) % f_size;
int im_chan = c / (f_size * f_size);
for (int y = 0; y < col_h; y++) {
for (int x = 0; x < col_w; x++) {
int im_row = (h_off + (y * stride)) - padd;
int im_col = (w_off + (x * stride)) - padd;
int col_index = (c * col_h + y) * col_w + x;
if (im_row < 0 || im_col < 0 || im_row >= in_h || im_col >= in_w) {
out[col_index] = 0;
}
else {
int im_index = im_col + in_w * (im_row + in_h * im_chan);
out[col_index] = im[im_index];
}
}
}
}
}
void cool2ami(float *cols, int in_c, int in_w, int in_h, int f_size, int stride, int padd, int col_w, int col_h, int col_c, float *out) {
#pragma omp parallel for collapse(2)
for (int c = 0; c < col_c; c++) {
int w_off = c % f_size;
int h_off = (c / f_size) % f_size;
int im_chan = c / (f_size * f_size);
for (int y = 0; y < col_h; y++) {
for (int x = 0; x < col_w; x++) {
int im_row = (h_off + (y * stride)) - padd;
int im_col = (w_off + (x * stride)) - padd;
if (im_row < 0 || im_col < 0 || im_row >= in_h || im_col >= in_w) {
continue;
}
else {
int im_index = im_col + in_w * (im_row + in_h * im_chan);
int col_index = (c * col_h + y) * col_w + x;
out[im_index] += cols[col_index];
}
}
}
}
} |
GB_unop__abs_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_fp64_fp64
// op(A') function: GB_unop_tran__abs_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = fabs (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabs (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = fabs (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = fabs (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint64)
// op(A') function: GB (_unop_tran__identity_fc32_uint64)
// C type: GxB_FC32_t
// A type: uint64_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint64)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
q4.c | #include <omp.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
void test(int *A, int size, clock_t start, clock_t end, int mode, int num_thread);
int* vecCreate (int size);
int* vecCreateOpenMP(int size, int num_thread);
int main(){
int num_thread, size = 50000000, *A = NULL;
int mode;
clock_t start, end;
printf("Enter number of threads: ");
scanf("%d", &num_thread);
start = clock();
A = vecCreate(size);
end = clock();
test(A, size, start, end, mode=1, num_thread);
if (size%num_thread != 0)
{
fprintf(stderr, "Error: number of threads must be divisible by vector size.\n");
exit(EXIT_FAILURE);
}
start = clock();
A = vecCreateOpenMP(size, num_thread);
end = clock();
test(A, size, start, end, mode=0, num_thread);
return 0;
}
void test(int *A, int size, clock_t start, clock_t end, int mode, int num_thread){
if (A==NULL)
{
fprintf(stderr, "Not enough memory\n");
exit(EXIT_FAILURE);
}
if (mode==1) printf("Using serial code\n");
else printf("Using OpenMP with %d threads\n", num_thread);
printf("v[%d] = %d\n", size - 1, A[size - 1]);
double time = (double)(end - start) * 1000 / CLOCKS_PER_SEC;
printf("Time: %f ms\n\n", time);
}
int* vecCreate (int size){
int *A = (int*)malloc(size * sizeof(int));
for (int i = 0; i < size; i++)
{
A[i] = i;
}
return A;
}
int* vecCreateOpenMP(int size, int num_thread){
int *A = (int*)malloc(size * sizeof(int));
int seg_len = size / num_thread;
#pragma omp parallel num_threads(num_thread)
{
int tid = omp_get_thread_num();
int start = tid * seg_len;
for (int i = 0; i < seg_len; i++)
{
A[start + i] = start + i;
}
}
return A;
}
// Output example 1:
// Enter number of threads: 4
// Using serial code
// v[49999999] = 49999999
// Time: 155.352000 ms
// Using OpenMP with 4 threads
// v[49999999] = 49999999
// Time: 100.248000 ms
// Output example 2:
// Enter number of threads: 20
// Using serial code
// v[49999999] = 49999999
// Time: 134.358000 ms
// Using OpenMP with 20 threads
// v[49999999] = 49999999
// Time: 199.236000 ms
// (Why slower with 20 threads??)
// Output example 3:
// Enter number of threads: 3
// Using serial code
// v[49999999] = 49999999
// Time: 152.849000 ms
// Error: number of threads must be divisible by vector size. |
Percent.h | /*
open source routing machine
Copyright (C) Dennis Luxen, others 2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU AFFERO General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see http://www.gnu.org/licenses/agpl.txt.
*/
#ifndef PERCENT_H
#define PERCENT_H
#include <iostream>
#ifdef _OPENMP
#include <omp.h>
#endif
class Percent
{
public:
/**
* Constructor.
* @param maxValue the value that corresponds to 100%
* @param step the progress is shown in steps of 'step' percent
*/
Percent(unsigned maxValue, unsigned step = 5) {
reinit(maxValue, step);
}
/** Reinitializes this object. */
void reinit(unsigned maxValue, unsigned step = 5) {
_maxValue = maxValue;
_current_value = 0;
_intervalPercent = _maxValue / 100;
_nextThreshold = _intervalPercent;
_lastPercent = 0;
_step = step;
}
/** If there has been significant progress, display it. */
void printStatus(unsigned currentValue) {
if (currentValue >= _nextThreshold) {
_nextThreshold += _intervalPercent;
printPercent( currentValue / (double)_maxValue * 100 );
}
if (currentValue + 1 == _maxValue)
std::cout << " 100%" << std::endl;
}
void printIncrement()
{
#pragma omp atomic
_current_value++;
printStatus(_current_value);
}
private:
unsigned _current_value;
unsigned _maxValue;
unsigned _intervalPercent;
unsigned _nextThreshold;
unsigned _lastPercent;
unsigned _step;
/** Displays the new progress. */
void printPercent(double percent) {
while (percent >= _lastPercent+_step) {
_lastPercent+=_step;
if (_lastPercent % 10 == 0) {
std::cout << " " << _lastPercent << "% ";
}
else {
std::cout << ".";
}
std::cout.flush();
}
}
};
#endif // PERCENT_H
|
gimplify.c | /* Tree lowering pass. This pass converts the GENERIC functions-as-trees
tree representation into the GIMPLE form.
Copyright (C) 2002-2015 Free Software Foundation, Inc.
Major work done by Sebastian Pop <s.pop@laposte.net>,
Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
#include "symtab.h"
#include "options.h"
#include "wide-int.h"
#include "inchash.h"
#include "tree.h"
#include "fold-const.h"
#include "hashtab.h"
#include "tm.h"
#include "hard-reg-set.h"
#include "function.h"
#include "rtl.h"
#include "flags.h"
#include "statistics.h"
#include "real.h"
#include "fixed-value.h"
#include "insn-config.h"
#include "expmed.h"
#include "dojump.h"
#include "explow.h"
#include "calls.h"
#include "emit-rtl.h"
#include "varasm.h"
#include "stmt.h"
#include "expr.h"
#include "predict.h"
#include "basic-block.h"
#include "tree-ssa-alias.h"
#include "internal-fn.h"
#include "gimple-fold.h"
#include "tree-eh.h"
#include "gimple-expr.h"
#include "is-a.h"
#include "gimple.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "stringpool.h"
#include "stor-layout.h"
#include "print-tree.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "tree-pretty-print.h"
#include "langhooks.h"
#include "bitmap.h"
#include "gimple-ssa.h"
#include "hash-map.h"
#include "plugin-api.h"
#include "ipa-ref.h"
#include "cgraph.h"
#include "tree-cfg.h"
#include "tree-ssanames.h"
#include "tree-ssa.h"
#include "diagnostic-core.h"
#include "target.h"
#include "splay-tree.h"
#include "omp-low.h"
#include "gimple-low.h"
#include "cilk.h"
#include "gomp-constants.h"
#include "langhooks-def.h" /* FIXME: for lhd_set_decl_assembler_name */
#include "tree-pass.h" /* FIXME: only for PROP_gimple_any */
#include "builtins.h"
enum gimplify_omp_var_data
{
GOVD_SEEN = 1,
GOVD_EXPLICIT = 2,
GOVD_SHARED = 4,
GOVD_PRIVATE = 8,
GOVD_FIRSTPRIVATE = 16,
GOVD_LASTPRIVATE = 32,
GOVD_REDUCTION = 64,
GOVD_LOCAL = 128,
GOVD_MAP = 256,
GOVD_DEBUG_PRIVATE = 512,
GOVD_PRIVATE_OUTER_REF = 1024,
GOVD_LINEAR = 2048,
GOVD_ALIGNED = 4096,
/* Flag for GOVD_MAP: don't copy back. */
GOVD_MAP_TO_ONLY = 8192,
/* Flag for GOVD_LINEAR or GOVD_LASTPRIVATE: no outer reference. */
GOVD_LINEAR_LASTPRIVATE_NO_OUTER = 16384,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LINEAR
| GOVD_LOCAL)
};
enum omp_region_type
{
ORT_WORKSHARE = 0,
ORT_SIMD = 1,
ORT_PARALLEL = 2,
ORT_COMBINED_PARALLEL = 3,
ORT_TASK = 4,
ORT_UNTIED_TASK = 5,
ORT_TEAMS = 8,
ORT_COMBINED_TEAMS = 9,
/* Data region. */
ORT_TARGET_DATA = 16,
/* Data region with offloading. */
ORT_TARGET = 32
};
/* Gimplify hashtable helper. */
struct gimplify_hasher : typed_free_remove <elt_t>
{
typedef elt_t value_type;
typedef elt_t compare_type;
static inline hashval_t hash (const value_type *);
static inline bool equal (const value_type *, const compare_type *);
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
vec<gbind *> bind_expr_stack;
tree temps;
gimple_seq conditional_cleanups;
tree exit_label;
tree return_temp;
vec<tree> case_labels;
/* The formal temporary table. Should this be persistent? */
hash_table<gimplify_hasher> *temp_htab;
int conditions;
bool save_stack;
bool into_ssa;
bool allow_rhs_cond_expr;
bool in_cleanup_point_expr;
};
struct gimplify_omp_ctx
{
struct gimplify_omp_ctx *outer_context;
splay_tree variables;
hash_set<tree> *privatized_types;
location_t location;
enum omp_clause_default_kind default_kind;
enum omp_region_type region_type;
bool combined_loop;
bool distribute;
};
static struct gimplify_ctx *gimplify_ctxp;
static struct gimplify_omp_ctx *gimplify_omp_ctxp;
/* Forward declaration. */
static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool);
/* Shorter alias name for the above function for use in gimplify.c
only. */
static inline void
gimplify_seq_add_stmt (gimple_seq *seq_p, gimple gs)
{
gimple_seq_add_stmt_without_update (seq_p, gs);
}
/* Append sequence SRC to the end of sequence *DST_P. If *DST_P is
NULL, a new sequence is allocated. This function is
similar to gimple_seq_add_seq, but does not scan the operands.
During gimplification, we need to manipulate statement sequences
before the def/use vectors have been constructed. */
static void
gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src)
{
gimple_stmt_iterator si;
if (src == NULL)
return;
si = gsi_last (*dst_p);
gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT);
}
/* Pointer to a list of allocated gimplify_ctx structs to be used for pushing
and popping gimplify contexts. */
static struct gimplify_ctx *ctx_pool = NULL;
/* Return a gimplify context struct from the pool. */
static inline struct gimplify_ctx *
ctx_alloc (void)
{
struct gimplify_ctx * c = ctx_pool;
if (c)
ctx_pool = c->prev_context;
else
c = XNEW (struct gimplify_ctx);
memset (c, '\0', sizeof (*c));
return c;
}
/* Put gimplify context C back into the pool. */
static inline void
ctx_free (struct gimplify_ctx *c)
{
c->prev_context = ctx_pool;
ctx_pool = c;
}
/* Free allocated ctx stack memory. */
void
free_gimplify_stack (void)
{
struct gimplify_ctx *c;
while ((c = ctx_pool))
{
ctx_pool = c->prev_context;
free (c);
}
}
/* Set up a context for the gimplifier. */
void
push_gimplify_context (bool in_ssa, bool rhs_cond_ok)
{
struct gimplify_ctx *c = ctx_alloc ();
c->prev_context = gimplify_ctxp;
gimplify_ctxp = c;
gimplify_ctxp->into_ssa = in_ssa;
gimplify_ctxp->allow_rhs_cond_expr = rhs_cond_ok;
}
/* Tear down a context for the gimplifier. If BODY is non-null, then
put the temporaries into the outer BIND_EXPR. Otherwise, put them
in the local_decls.
BODY is not a sequence, but the first tuple in a sequence. */
void
pop_gimplify_context (gimple body)
{
struct gimplify_ctx *c = gimplify_ctxp;
gcc_assert (c
&& (!c->bind_expr_stack.exists ()
|| c->bind_expr_stack.is_empty ()));
c->bind_expr_stack.release ();
gimplify_ctxp = c->prev_context;
if (body)
declare_vars (c->temps, body, false);
else
record_vars (c->temps);
delete c->temp_htab;
c->temp_htab = NULL;
ctx_free (c);
}
/* Push a GIMPLE_BIND tuple onto the stack of bindings. */
static void
gimple_push_bind_expr (gbind *bind_stmt)
{
gimplify_ctxp->bind_expr_stack.reserve (8);
gimplify_ctxp->bind_expr_stack.safe_push (bind_stmt);
}
/* Pop the first element off the stack of bindings. */
static void
gimple_pop_bind_expr (void)
{
gimplify_ctxp->bind_expr_stack.pop ();
}
/* Return the first element of the stack of bindings. */
gbind *
gimple_current_bind_expr (void)
{
return gimplify_ctxp->bind_expr_stack.last ();
}
/* Return the stack of bindings created during gimplification. */
vec<gbind *>
gimple_bind_expr_stack (void)
{
return gimplify_ctxp->bind_expr_stack;
}
/* Return true iff there is a COND_EXPR between us and the innermost
CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */
static bool
gimple_conditional_context (void)
{
return gimplify_ctxp->conditions > 0;
}
/* Note that we've entered a COND_EXPR. */
static void
gimple_push_condition (void)
{
#ifdef ENABLE_GIMPLE_CHECKING
if (gimplify_ctxp->conditions == 0)
gcc_assert (gimple_seq_empty_p (gimplify_ctxp->conditional_cleanups));
#endif
++(gimplify_ctxp->conditions);
}
/* Note that we've left a COND_EXPR. If we're back at unconditional scope
now, add any conditional cleanups we've seen to the prequeue. */
static void
gimple_pop_condition (gimple_seq *pre_p)
{
int conds = --(gimplify_ctxp->conditions);
gcc_assert (conds >= 0);
if (conds == 0)
{
gimplify_seq_add_seq (pre_p, gimplify_ctxp->conditional_cleanups);
gimplify_ctxp->conditional_cleanups = NULL;
}
}
/* A stable comparison routine for use with splay trees and DECLs. */
static int
splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb)
{
tree a = (tree) xa;
tree b = (tree) xb;
return DECL_UID (a) - DECL_UID (b);
}
/* Create a new omp construct that deals with variable remapping. */
static struct gimplify_omp_ctx *
new_omp_context (enum omp_region_type region_type)
{
struct gimplify_omp_ctx *c;
c = XCNEW (struct gimplify_omp_ctx);
c->outer_context = gimplify_omp_ctxp;
c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
c->privatized_types = new hash_set<tree>;
c->location = input_location;
c->region_type = region_type;
if ((region_type & ORT_TASK) == 0)
c->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
else
c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
return c;
}
/* Destroy an omp construct that deals with variable remapping. */
static void
delete_omp_context (struct gimplify_omp_ctx *c)
{
splay_tree_delete (c->variables);
delete c->privatized_types;
XDELETE (c);
}
static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int);
static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool);
/* Both gimplify the statement T and append it to *SEQ_P. This function
behaves exactly as gimplify_stmt, but you don't have to pass T as a
reference. */
void
gimplify_and_add (tree t, gimple_seq *seq_p)
{
gimplify_stmt (&t, seq_p);
}
/* Gimplify statement T into sequence *SEQ_P, and return the first
tuple in the sequence of generated tuples for this statement.
Return NULL if gimplifying T produced no tuples. */
static gimple
gimplify_and_return_first (tree t, gimple_seq *seq_p)
{
gimple_stmt_iterator last = gsi_last (*seq_p);
gimplify_and_add (t, seq_p);
if (!gsi_end_p (last))
{
gsi_next (&last);
return gsi_stmt (last);
}
else
return gimple_seq_first_stmt (*seq_p);
}
/* Returns true iff T is a valid RHS for an assignment to an un-renamed
LHS, or for a call argument. */
static bool
is_gimple_mem_rhs (tree t)
{
/* If we're dealing with a renamable type, either source or dest must be
a renamed variable. */
if (is_gimple_reg_type (TREE_TYPE (t)))
return is_gimple_val (t);
else
return is_gimple_val (t) || is_gimple_lvalue (t);
}
/* Return true if T is a CALL_EXPR or an expression that can be
assigned to a temporary. Note that this predicate should only be
used during gimplification. See the rationale for this in
gimplify_modify_expr. */
static bool
is_gimple_reg_rhs_or_call (tree t)
{
return (get_gimple_rhs_class (TREE_CODE (t)) != GIMPLE_INVALID_RHS
|| TREE_CODE (t) == CALL_EXPR);
}
/* Return true if T is a valid memory RHS or a CALL_EXPR. Note that
this predicate should only be used during gimplification. See the
rationale for this in gimplify_modify_expr. */
static bool
is_gimple_mem_rhs_or_call (tree t)
{
/* If we're dealing with a renamable type, either source or dest must be
a renamed variable. */
if (is_gimple_reg_type (TREE_TYPE (t)))
return is_gimple_val (t);
else
return (is_gimple_val (t) || is_gimple_lvalue (t)
|| TREE_CODE (t) == CALL_EXPR);
}
/* Create a temporary with a name derived from VAL. Subroutine of
lookup_tmp_var; nobody else should call this function. */
static inline tree
create_tmp_from_val (tree val)
{
/* Drop all qualifiers and address-space information from the value type. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (val));
tree var = create_tmp_var (type, get_name (val));
if (TREE_CODE (TREE_TYPE (var)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (var)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (var) = 1;
return var;
}
/* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse
an existing expression temporary. */
static tree
lookup_tmp_var (tree val, bool is_formal)
{
tree ret;
/* If not optimizing, never really reuse a temporary. local-alloc
won't allocate any variable that is used in more than one basic
block, which means it will go into memory, causing much extra
work in reload and final and poorer code generation, outweighing
the extra memory allocation here. */
if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val))
ret = create_tmp_from_val (val);
else
{
elt_t elt, *elt_p;
elt_t **slot;
elt.val = val;
if (!gimplify_ctxp->temp_htab)
gimplify_ctxp->temp_htab = new hash_table<gimplify_hasher> (1000);
slot = gimplify_ctxp->temp_htab->find_slot (&elt, INSERT);
if (*slot == NULL)
{
elt_p = XNEW (elt_t);
elt_p->val = val;
elt_p->temp = ret = create_tmp_from_val (val);
*slot = elt_p;
}
else
{
elt_p = *slot;
ret = elt_p->temp;
}
}
return ret;
}
/* Helper for get_formal_tmp_var and get_initialized_tmp_var. */
static tree
internal_get_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p,
bool is_formal)
{
tree t, mod;
/* Notice that we explicitly allow VAL to be a CALL_EXPR so that we
can create an INIT_EXPR and convert it into a GIMPLE_CALL below. */
gimplify_expr (&val, pre_p, post_p, is_gimple_reg_rhs_or_call,
fb_rvalue);
if (gimplify_ctxp->into_ssa
&& is_gimple_reg_type (TREE_TYPE (val)))
t = make_ssa_name (TYPE_MAIN_VARIANT (TREE_TYPE (val)));
else
t = lookup_tmp_var (val, is_formal);
mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val));
SET_EXPR_LOCATION (mod, EXPR_LOC_OR_LOC (val, input_location));
/* gimplify_modify_expr might want to reduce this further. */
gimplify_and_add (mod, pre_p);
ggc_free (mod);
return t;
}
/* Return a formal temporary variable initialized with VAL. PRE_P is as
in gimplify_expr. Only use this function if:
1) The value of the unfactored expression represented by VAL will not
change between the initialization and use of the temporary, and
2) The temporary will not be otherwise modified.
For instance, #1 means that this is inappropriate for SAVE_EXPR temps,
and #2 means it is inappropriate for && temps.
For other cases, use get_initialized_tmp_var instead. */
tree
get_formal_tmp_var (tree val, gimple_seq *pre_p)
{
return internal_get_tmp_var (val, pre_p, NULL, true);
}
/* Return a temporary variable initialized with VAL. PRE_P and POST_P
are as in gimplify_expr. */
tree
get_initialized_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p)
{
return internal_get_tmp_var (val, pre_p, post_p, false);
}
/* Declare all the variables in VARS in SCOPE. If DEBUG_INFO is true,
generate debug info for them; otherwise don't. */
void
declare_vars (tree vars, gimple gs, bool debug_info)
{
tree last = vars;
if (last)
{
tree temps, block;
gbind *scope = as_a <gbind *> (gs);
temps = nreverse (last);
block = gimple_bind_block (scope);
gcc_assert (!block || TREE_CODE (block) == BLOCK);
if (!block || !debug_info)
{
DECL_CHAIN (last) = gimple_bind_vars (scope);
gimple_bind_set_vars (scope, temps);
}
else
{
/* We need to attach the nodes both to the BIND_EXPR and to its
associated BLOCK for debugging purposes. The key point here
is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR
is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */
if (BLOCK_VARS (block))
BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps);
else
{
gimple_bind_set_vars (scope,
chainon (gimple_bind_vars (scope), temps));
BLOCK_VARS (block) = temps;
}
}
}
}
/* For VAR a VAR_DECL of variable size, try to find a constant upper bound
for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if
no such upper bound can be obtained. */
static void
force_constant_size (tree var)
{
/* The only attempt we make is by querying the maximum size of objects
of the variable's type. */
HOST_WIDE_INT max_size;
gcc_assert (TREE_CODE (var) == VAR_DECL);
max_size = max_int_size_in_bytes (TREE_TYPE (var));
gcc_assert (max_size >= 0);
DECL_SIZE_UNIT (var)
= build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size);
DECL_SIZE (var)
= build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT);
}
/* Push the temporary variable TMP into the current binding. */
void
gimple_add_tmp_var_fn (struct function *fn, tree tmp)
{
gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (tmp)))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = fn->decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
record_vars_into (tmp, fn->decl);
}
/* Push the temporary variable TMP into the current binding. */
void
gimple_add_tmp_var (tree tmp)
{
gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (tmp)))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
if (gimplify_ctxp)
{
DECL_CHAIN (tmp) = gimplify_ctxp->temps;
gimplify_ctxp->temps = tmp;
/* Mark temporaries local within the nearest enclosing parallel. */
if (gimplify_omp_ctxp)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx
&& (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_SIMD))
ctx = ctx->outer_context;
if (ctx)
omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN);
}
}
else if (cfun)
record_vars (tmp);
else
{
gimple_seq body_seq;
/* This case is for nested functions. We need to expose the locals
they create. */
body_seq = gimple_body (current_function_decl);
declare_vars (tmp, gimple_seq_first_stmt (body_seq), false);
}
}
/* This page contains routines to unshare tree nodes, i.e. to duplicate tree
nodes that are referenced more than once in GENERIC functions. This is
necessary because gimplification (translation into GIMPLE) is performed
by modifying tree nodes in-place, so gimplication of a shared node in a
first context could generate an invalid GIMPLE form in a second context.
This is achieved with a simple mark/copy/unmark algorithm that walks the
GENERIC representation top-down, marks nodes with TREE_VISITED the first
time it encounters them, duplicates them if they already have TREE_VISITED
set, and finally removes the TREE_VISITED marks it has set.
The algorithm works only at the function level, i.e. it generates a GENERIC
representation of a function with no nodes shared within the function when
passed a GENERIC function (except for nodes that are allowed to be shared).
At the global level, it is also necessary to unshare tree nodes that are
referenced in more than one function, for the same aforementioned reason.
This requires some cooperation from the front-end. There are 2 strategies:
1. Manual unsharing. The front-end needs to call unshare_expr on every
expression that might end up being shared across functions.
2. Deep unsharing. This is an extension of regular unsharing. Instead
of calling unshare_expr on expressions that might be shared across
functions, the front-end pre-marks them with TREE_VISITED. This will
ensure that they are unshared on the first reference within functions
when the regular unsharing algorithm runs. The counterpart is that
this algorithm must look deeper than for manual unsharing, which is
specified by LANG_HOOKS_DEEP_UNSHARING.
If there are only few specific cases of node sharing across functions, it is
probably easier for a front-end to unshare the expressions manually. On the
contrary, if the expressions generated at the global level are as widespread
as expressions generated within functions, deep unsharing is very likely the
way to go. */
/* Similar to copy_tree_r but do not copy SAVE_EXPR or TARGET_EXPR nodes.
These nodes model computations that must be done once. If we were to
unshare something like SAVE_EXPR(i++), the gimplification process would
create wrong code. However, if DATA is non-null, it must hold a pointer
set that is used to unshare the subtrees of these nodes. */
static tree
mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Do not copy SAVE_EXPR, TARGET_EXPR or BIND_EXPR nodes themselves, but
copy their subtrees if we can make sure to do it only once. */
if (code == SAVE_EXPR || code == TARGET_EXPR || code == BIND_EXPR)
{
if (data && !((hash_set<tree> *)data)->add (t))
;
else
*walk_subtrees = 0;
}
/* Stop at types, decls, constants like copy_tree_r. */
else if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant
/* We can't do anything sensible with a BLOCK used as an
expression, but we also can't just die when we see it
because of non-expression uses. So we avert our eyes
and cross our fingers. Silly Java. */
|| code == BLOCK)
*walk_subtrees = 0;
/* Cope with the statement expression extension. */
else if (code == STATEMENT_LIST)
;
/* Leave the bulk of the work to copy_tree_r itself. */
else
copy_tree_r (tp, walk_subtrees, NULL);
return NULL_TREE;
}
/* Callback for walk_tree to unshare most of the shared trees rooted at *TP.
If *TP has been visited already, then *TP is deeply copied by calling
mostly_copy_tree_r. DATA is passed to mostly_copy_tree_r unmodified. */
static tree
copy_if_shared_r (tree *tp, int *walk_subtrees, void *data)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Skip types, decls, and constants. But we do want to look at their
types and the bounds of types. Mark them as visited so we properly
unmark their subtrees on the unmark pass. If we've already seen them,
don't look down further. */
if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant)
{
if (TREE_VISITED (t))
*walk_subtrees = 0;
else
TREE_VISITED (t) = 1;
}
/* If this node has been visited already, unshare it and don't look
any deeper. */
else if (TREE_VISITED (t))
{
walk_tree (tp, mostly_copy_tree_r, data, NULL);
*walk_subtrees = 0;
}
/* Otherwise, mark the node as visited and keep looking. */
else
TREE_VISITED (t) = 1;
return NULL_TREE;
}
/* Unshare most of the shared trees rooted at *TP. DATA is passed to the
copy_if_shared_r callback unmodified. */
static inline void
copy_if_shared (tree *tp, void *data)
{
walk_tree (tp, copy_if_shared_r, data, NULL);
}
/* Unshare all the trees in the body of FNDECL, as well as in the bodies of
any nested functions. */
static void
unshare_body (tree fndecl)
{
struct cgraph_node *cgn = cgraph_node::get (fndecl);
/* If the language requires deep unsharing, we need a pointer set to make
sure we don't repeatedly unshare subtrees of unshareable nodes. */
hash_set<tree> *visited
= lang_hooks.deep_unsharing ? new hash_set<tree> : NULL;
copy_if_shared (&DECL_SAVED_TREE (fndecl), visited);
copy_if_shared (&DECL_SIZE (DECL_RESULT (fndecl)), visited);
copy_if_shared (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)), visited);
delete visited;
if (cgn)
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unshare_body (cgn->decl);
}
/* Callback for walk_tree to unmark the visited trees rooted at *TP.
Subtrees are walked until the first unvisited node is encountered. */
static tree
unmark_visited_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
tree t = *tp;
/* If this node has been visited, unmark it and keep looking. */
if (TREE_VISITED (t))
TREE_VISITED (t) = 0;
/* Otherwise, don't look any deeper. */
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Unmark the visited trees rooted at *TP. */
static inline void
unmark_visited (tree *tp)
{
walk_tree (tp, unmark_visited_r, NULL, NULL);
}
/* Likewise, but mark all trees as not visited. */
static void
unvisit_body (tree fndecl)
{
struct cgraph_node *cgn = cgraph_node::get (fndecl);
unmark_visited (&DECL_SAVED_TREE (fndecl));
unmark_visited (&DECL_SIZE (DECL_RESULT (fndecl)));
unmark_visited (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)));
if (cgn)
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unvisit_body (cgn->decl);
}
/* Unconditionally make an unshared copy of EXPR. This is used when using
stored expressions which span multiple functions, such as BINFO_VTABLE,
as the normal unsharing process can't tell that they're shared. */
tree
unshare_expr (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
return expr;
}
/* Worker for unshare_expr_without_location. */
static tree
prune_expr_location (tree *tp, int *walk_subtrees, void *)
{
if (EXPR_P (*tp))
SET_EXPR_LOCATION (*tp, UNKNOWN_LOCATION);
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Similar to unshare_expr but also prune all expression locations
from EXPR. */
tree
unshare_expr_without_location (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
if (EXPR_P (expr))
walk_tree (&expr, prune_expr_location, NULL, NULL);
return expr;
}
/* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both
contain statements and have a value. Assign its value to a temporary
and give it void_type_node. Return the temporary, or NULL_TREE if
WRAPPER was already void. */
tree
voidify_wrapper_expr (tree wrapper, tree temp)
{
tree type = TREE_TYPE (wrapper);
if (type && !VOID_TYPE_P (type))
{
tree *p;
/* Set p to point to the body of the wrapper. Loop until we find
something that isn't a wrapper. */
for (p = &wrapper; p && *p; )
{
switch (TREE_CODE (*p))
{
case BIND_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
/* For a BIND_EXPR, the body is operand 1. */
p = &BIND_EXPR_BODY (*p);
break;
case CLEANUP_POINT_EXPR:
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
case STATEMENT_LIST:
{
tree_stmt_iterator i = tsi_last (*p);
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i);
}
break;
case COMPOUND_EXPR:
/* Advance to the last statement. Set all container types to
void. */
for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1))
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
}
break;
case TRANSACTION_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TRANSACTION_EXPR_BODY (*p);
break;
default:
/* Assume that any tree upon which voidify_wrapper_expr is
directly called is a wrapper, and that its body is op0. */
if (p == &wrapper)
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
}
goto out;
}
}
out:
if (p == NULL || IS_EMPTY_STMT (*p))
temp = NULL_TREE;
else if (temp)
{
/* The wrapper is on the RHS of an assignment that we're pushing
down. */
gcc_assert (TREE_CODE (temp) == INIT_EXPR
|| TREE_CODE (temp) == MODIFY_EXPR);
TREE_OPERAND (temp, 1) = *p;
*p = temp;
}
else
{
temp = create_tmp_var (type, "retval");
*p = build2 (INIT_EXPR, type, temp, *p);
}
return temp;
}
return NULL_TREE;
}
/* Prepare calls to builtins to SAVE and RESTORE the stack as well as
a temporary through which they communicate. */
static void
build_stack_save_restore (gcall **save, gcall **restore)
{
tree tmp_var;
*save = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0);
tmp_var = create_tmp_var (ptr_type_node, "saved_stack");
gimple_call_set_lhs (*save, tmp_var);
*restore
= gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE),
1, tmp_var);
}
/* Gimplify a BIND_EXPR. Just voidify and recurse. */
static enum gimplify_status
gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p)
{
tree bind_expr = *expr_p;
bool old_save_stack = gimplify_ctxp->save_stack;
tree t;
gbind *bind_stmt;
gimple_seq body, cleanup;
gcall *stack_save;
location_t start_locus = 0, end_locus = 0;
tree temp = voidify_wrapper_expr (bind_expr, NULL);
/* Mark variables seen in this bind expr. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t))
{
if (TREE_CODE (t) == VAR_DECL)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
/* Mark variable as local. */
if (ctx && !DECL_EXTERNAL (t)
&& (! DECL_SEEN_IN_BIND_EXPR_P (t)
|| splay_tree_lookup (ctx->variables,
(splay_tree_key) t) == NULL))
{
if (ctx->region_type == ORT_SIMD
&& TREE_ADDRESSABLE (t)
&& !TREE_STATIC (t))
omp_add_variable (ctx, t, GOVD_PRIVATE | GOVD_SEEN);
else
omp_add_variable (ctx, t, GOVD_LOCAL | GOVD_SEEN);
}
DECL_SEEN_IN_BIND_EXPR_P (t) = 1;
if (DECL_HARD_REGISTER (t) && !is_global_var (t) && cfun)
cfun->has_local_explicit_reg_vars = true;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (t)
&& (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t))
&& !needs_to_live_in_memory (t))
DECL_GIMPLE_REG_P (t) = 1;
}
bind_stmt = gimple_build_bind (BIND_EXPR_VARS (bind_expr), NULL,
BIND_EXPR_BLOCK (bind_expr));
gimple_push_bind_expr (bind_stmt);
gimplify_ctxp->save_stack = false;
/* Gimplify the body into the GIMPLE_BIND tuple's body. */
body = NULL;
gimplify_stmt (&BIND_EXPR_BODY (bind_expr), &body);
gimple_bind_set_body (bind_stmt, body);
/* Source location wise, the cleanup code (stack_restore and clobbers)
belongs to the end of the block, so propagate what we have. The
stack_save operation belongs to the beginning of block, which we can
infer from the bind_expr directly if the block has no explicit
assignment. */
if (BIND_EXPR_BLOCK (bind_expr))
{
end_locus = BLOCK_SOURCE_END_LOCATION (BIND_EXPR_BLOCK (bind_expr));
start_locus = BLOCK_SOURCE_LOCATION (BIND_EXPR_BLOCK (bind_expr));
}
if (start_locus == 0)
start_locus = EXPR_LOCATION (bind_expr);
cleanup = NULL;
stack_save = NULL;
if (gimplify_ctxp->save_stack)
{
gcall *stack_restore;
/* Save stack on entry and restore it on exit. Add a try_finally
block to achieve this. */
build_stack_save_restore (&stack_save, &stack_restore);
gimple_set_location (stack_save, start_locus);
gimple_set_location (stack_restore, end_locus);
gimplify_seq_add_stmt (&cleanup, stack_restore);
}
/* Add clobbers for all variables that go out of scope. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t))
{
if (TREE_CODE (t) == VAR_DECL
&& !is_global_var (t)
&& DECL_CONTEXT (t) == current_function_decl
&& !DECL_HARD_REGISTER (t)
&& !TREE_THIS_VOLATILE (t)
&& !DECL_HAS_VALUE_EXPR_P (t)
/* Only care for variables that have to be in memory. Others
will be rewritten into SSA names, hence moved to the top-level. */
&& !is_gimple_reg (t)
&& flag_stack_reuse != SR_NONE)
{
tree clobber = build_constructor (TREE_TYPE (t), NULL);
gimple clobber_stmt;
TREE_THIS_VOLATILE (clobber) = 1;
clobber_stmt = gimple_build_assign (t, clobber);
gimple_set_location (clobber_stmt, end_locus);
gimplify_seq_add_stmt (&cleanup, clobber_stmt);
}
}
if (cleanup)
{
gtry *gs;
gimple_seq new_body;
new_body = NULL;
gs = gimple_build_try (gimple_bind_body (bind_stmt), cleanup,
GIMPLE_TRY_FINALLY);
if (stack_save)
gimplify_seq_add_stmt (&new_body, stack_save);
gimplify_seq_add_stmt (&new_body, gs);
gimple_bind_set_body (bind_stmt, new_body);
}
gimplify_ctxp->save_stack = old_save_stack;
gimple_pop_bind_expr ();
gimplify_seq_add_stmt (pre_p, bind_stmt);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify a RETURN_EXPR. If the expression to be returned is not a
GIMPLE value, it is assigned to a new temporary and the statement is
re-written to return the temporary.
PRE_P points to the sequence where side effects that must happen before
STMT should be stored. */
static enum gimplify_status
gimplify_return_expr (tree stmt, gimple_seq *pre_p)
{
greturn *ret;
tree ret_expr = TREE_OPERAND (stmt, 0);
tree result_decl, result;
if (ret_expr == error_mark_node)
return GS_ERROR;
/* Implicit _Cilk_sync must be inserted right before any return statement
if there is a _Cilk_spawn in the function. If the user has provided a
_Cilk_sync, the optimizer should remove this duplicate one. */
if (fn_contains_cilk_spawn_p (cfun))
{
tree impl_sync = build0 (CILK_SYNC_STMT, void_type_node);
gimplify_and_add (impl_sync, pre_p);
}
if (!ret_expr
|| TREE_CODE (ret_expr) == RESULT_DECL
|| ret_expr == error_mark_node)
{
greturn *ret = gimple_build_return (ret_expr);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))))
result_decl = NULL_TREE;
else
{
result_decl = TREE_OPERAND (ret_expr, 0);
/* See through a return by reference. */
if (TREE_CODE (result_decl) == INDIRECT_REF)
result_decl = TREE_OPERAND (result_decl, 0);
gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR
|| TREE_CODE (ret_expr) == INIT_EXPR)
&& TREE_CODE (result_decl) == RESULT_DECL);
}
/* If aggregate_value_p is true, then we can return the bare RESULT_DECL.
Recall that aggregate_value_p is FALSE for any aggregate type that is
returned in registers. If we're returning values in registers, then
we don't want to extend the lifetime of the RESULT_DECL, particularly
across another call. In addition, for those aggregates for which
hard_function_value generates a PARALLEL, we'll die during normal
expansion of structure assignments; there's special code in expand_return
to handle this case that does not exist in expand_expr. */
if (!result_decl)
result = NULL_TREE;
else if (aggregate_value_p (result_decl, TREE_TYPE (current_function_decl)))
{
if (TREE_CODE (DECL_SIZE (result_decl)) != INTEGER_CST)
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (result_decl)))
gimplify_type_sizes (TREE_TYPE (result_decl), pre_p);
/* Note that we don't use gimplify_vla_decl because the RESULT_DECL
should be effectively allocated by the caller, i.e. all calls to
this function must be subject to the Return Slot Optimization. */
gimplify_one_sizepos (&DECL_SIZE (result_decl), pre_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (result_decl), pre_p);
}
result = result_decl;
}
else if (gimplify_ctxp->return_temp)
result = gimplify_ctxp->return_temp;
else
{
result = create_tmp_reg (TREE_TYPE (result_decl));
/* ??? With complex control flow (usually involving abnormal edges),
we can wind up warning about an uninitialized value for this. Due
to how this variable is constructed and initialized, this is never
true. Give up and never warn. */
TREE_NO_WARNING (result) = 1;
gimplify_ctxp->return_temp = result;
}
/* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use.
Then gimplify the whole thing. */
if (result != result_decl)
TREE_OPERAND (ret_expr, 0) = result;
gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p);
ret = gimple_build_return (result);
gimple_set_no_warning (ret, TREE_NO_WARNING (stmt));
gimplify_seq_add_stmt (pre_p, ret);
return GS_ALL_DONE;
}
/* Gimplify a variable-length array DECL. */
static void
gimplify_vla_decl (tree decl, gimple_seq *seq_p)
{
/* This is a variable-sized decl. Simplify its size and mark it
for deferred expansion. */
tree t, addr, ptr_type;
gimplify_one_sizepos (&DECL_SIZE (decl), seq_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), seq_p);
/* Don't mess with a DECL_VALUE_EXPR set by the front-end. */
if (DECL_HAS_VALUE_EXPR_P (decl))
return;
/* All occurrences of this decl in final gimplified code will be
replaced by indirection. Setting DECL_VALUE_EXPR does two
things: First, it lets the rest of the gimplifier know what
replacement to use. Second, it lets the debug info know
where to find the value. */
ptr_type = build_pointer_type (TREE_TYPE (decl));
addr = create_tmp_var (ptr_type, get_name (decl));
DECL_IGNORED_P (addr) = 0;
t = build_fold_indirect_ref (addr);
TREE_THIS_NOTRAP (t) = 1;
SET_DECL_VALUE_EXPR (decl, t);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
t = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
t = build_call_expr (t, 2, DECL_SIZE_UNIT (decl),
size_int (DECL_ALIGN (decl)));
/* The call has been built for a variable-sized object. */
CALL_ALLOCA_FOR_VAR_P (t) = 1;
t = fold_convert (ptr_type, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t);
gimplify_and_add (t, seq_p);
/* Indicate that we need to restore the stack level when the
enclosing BIND_EXPR is exited. */
gimplify_ctxp->save_stack = true;
}
/* A helper function to be called via walk_tree. Mark all labels under *TP
as being forced. To be called for DECL_INITIAL of static variables. */
static tree
force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
if (TREE_CODE (*tp) == LABEL_DECL)
FORCED_LABEL (*tp) = 1;
return NULL_TREE;
}
/* Gimplify a DECL_EXPR node *STMT_P by making any necessary allocation
and initialization explicit. */
static enum gimplify_status
gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p)
{
tree stmt = *stmt_p;
tree decl = DECL_EXPR_DECL (stmt);
*stmt_p = NULL_TREE;
if (TREE_TYPE (decl) == error_mark_node)
return GS_ERROR;
if ((TREE_CODE (decl) == TYPE_DECL
|| TREE_CODE (decl) == VAR_DECL)
&& !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl)))
gimplify_type_sizes (TREE_TYPE (decl), seq_p);
/* ??? DECL_ORIGINAL_TYPE is streamed for LTO so it needs to be gimplified
in case its size expressions contain problematic nodes like CALL_EXPR. */
if (TREE_CODE (decl) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (decl)
&& !TYPE_SIZES_GIMPLIFIED (DECL_ORIGINAL_TYPE (decl)))
gimplify_type_sizes (DECL_ORIGINAL_TYPE (decl), seq_p);
if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl))
{
tree init = DECL_INITIAL (decl);
if (TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST
|| (!TREE_STATIC (decl)
&& flag_stack_check == GENERIC_STACK_CHECK
&& compare_tree_int (DECL_SIZE_UNIT (decl),
STACK_CHECK_MAX_VAR_SIZE) > 0))
gimplify_vla_decl (decl, seq_p);
/* Some front ends do not explicitly declare all anonymous
artificial variables. We compensate here by declaring the
variables, though it would be better if the front ends would
explicitly declare them. */
if (!DECL_SEEN_IN_BIND_EXPR_P (decl)
&& DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE)
gimple_add_tmp_var (decl);
if (init && init != error_mark_node)
{
if (!TREE_STATIC (decl))
{
DECL_INITIAL (decl) = NULL_TREE;
init = build2 (INIT_EXPR, void_type_node, decl, init);
gimplify_and_add (init, seq_p);
ggc_free (init);
}
else
/* We must still examine initializers for static variables
as they may contain a label address. */
walk_tree (&init, force_labels_r, NULL, NULL);
}
}
return GS_ALL_DONE;
}
/* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body
and replacing the LOOP_EXPR with goto, but if the loop contains an
EXIT_EXPR, we need to append a label for it to jump to. */
static enum gimplify_status
gimplify_loop_expr (tree *expr_p, gimple_seq *pre_p)
{
tree saved_label = gimplify_ctxp->exit_label;
tree start_label = create_artificial_label (UNKNOWN_LOCATION);
gimplify_seq_add_stmt (pre_p, gimple_build_label (start_label));
gimplify_ctxp->exit_label = NULL_TREE;
gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p);
gimplify_seq_add_stmt (pre_p, gimple_build_goto (start_label));
if (gimplify_ctxp->exit_label)
gimplify_seq_add_stmt (pre_p,
gimple_build_label (gimplify_ctxp->exit_label));
gimplify_ctxp->exit_label = saved_label;
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a statement list onto a sequence. These may be created either
by an enlightened front-end, or by shortcut_cond_expr. */
static enum gimplify_status
gimplify_statement_list (tree *expr_p, gimple_seq *pre_p)
{
tree temp = voidify_wrapper_expr (*expr_p, NULL);
tree_stmt_iterator i = tsi_start (*expr_p);
while (!tsi_end_p (i))
{
gimplify_stmt (tsi_stmt_ptr (i), pre_p);
tsi_delink (&i);
}
if (temp)
{
*expr_p = temp;
return GS_OK;
}
return GS_ALL_DONE;
}
/* Gimplify a SWITCH_EXPR, and collect the vector of labels it can
branch to. */
static enum gimplify_status
gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p)
{
tree switch_expr = *expr_p;
gimple_seq switch_body_seq = NULL;
enum gimplify_status ret;
tree index_type = TREE_TYPE (switch_expr);
if (index_type == NULL_TREE)
index_type = TREE_TYPE (SWITCH_COND (switch_expr));
ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val,
fb_rvalue);
if (ret == GS_ERROR || ret == GS_UNHANDLED)
return ret;
if (SWITCH_BODY (switch_expr))
{
vec<tree> labels;
vec<tree> saved_labels;
tree default_case = NULL_TREE;
gswitch *switch_stmt;
/* If someone can be bothered to fill in the labels, they can
be bothered to null out the body too. */
gcc_assert (!SWITCH_LABELS (switch_expr));
/* Save old labels, get new ones from body, then restore the old
labels. Save all the things from the switch body to append after. */
saved_labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels.create (8);
gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq);
labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = saved_labels;
preprocess_case_label_vec_for_gimple (labels, index_type,
&default_case);
if (!default_case)
{
glabel *new_default;
default_case
= build_case_label (NULL_TREE, NULL_TREE,
create_artificial_label (UNKNOWN_LOCATION));
new_default = gimple_build_label (CASE_LABEL (default_case));
gimplify_seq_add_stmt (&switch_body_seq, new_default);
}
switch_stmt = gimple_build_switch (SWITCH_COND (switch_expr),
default_case, labels);
gimplify_seq_add_stmt (pre_p, switch_stmt);
gimplify_seq_add_seq (pre_p, switch_body_seq);
labels.release ();
}
else
gcc_assert (SWITCH_LABELS (switch_expr));
return GS_ALL_DONE;
}
/* Gimplify the CASE_LABEL_EXPR pointed to by EXPR_P. */
static enum gimplify_status
gimplify_case_label_expr (tree *expr_p, gimple_seq *pre_p)
{
struct gimplify_ctx *ctxp;
glabel *label_stmt;
/* Invalid programs can play Duff's Device type games with, for example,
#pragma omp parallel. At least in the C front end, we don't
detect such invalid branches until after gimplification, in the
diagnose_omp_blocks pass. */
for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context)
if (ctxp->case_labels.exists ())
break;
label_stmt = gimple_build_label (CASE_LABEL (*expr_p));
ctxp->case_labels.safe_push (*expr_p);
gimplify_seq_add_stmt (pre_p, label_stmt);
return GS_ALL_DONE;
}
/* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first
if necessary. */
tree
build_and_jump (tree *label_p)
{
if (label_p == NULL)
/* If there's nowhere to jump, just fall through. */
return NULL_TREE;
if (*label_p == NULL_TREE)
{
tree label = create_artificial_label (UNKNOWN_LOCATION);
*label_p = label;
}
return build1 (GOTO_EXPR, void_type_node, *label_p);
}
/* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR.
This also involves building a label to jump to and communicating it to
gimplify_loop_expr through gimplify_ctxp->exit_label. */
static enum gimplify_status
gimplify_exit_expr (tree *expr_p)
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree expr;
expr = build_and_jump (&gimplify_ctxp->exit_label);
expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE);
*expr_p = expr;
return GS_OK;
}
/* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is
different from its canonical type, wrap the whole thing inside a
NOP_EXPR and force the type of the COMPONENT_REF to be the canonical
type.
The canonical type of a COMPONENT_REF is the type of the field being
referenced--unless the field is a bit-field which can be read directly
in a smaller mode, in which case the canonical type is the
sign-appropriate type corresponding to that mode. */
static void
canonicalize_component_ref (tree *expr_p)
{
tree expr = *expr_p;
tree type;
gcc_assert (TREE_CODE (expr) == COMPONENT_REF);
if (INTEGRAL_TYPE_P (TREE_TYPE (expr)))
type = TREE_TYPE (get_unwidened (expr, NULL_TREE));
else
type = TREE_TYPE (TREE_OPERAND (expr, 1));
/* One could argue that all the stuff below is not necessary for
the non-bitfield case and declare it a FE error if type
adjustment would be needed. */
if (TREE_TYPE (expr) != type)
{
#ifdef ENABLE_TYPES_CHECKING
tree old_type = TREE_TYPE (expr);
#endif
int type_quals;
/* We need to preserve qualifiers and propagate them from
operand 0. */
type_quals = TYPE_QUALS (type)
| TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0)));
if (TYPE_QUALS (type) != type_quals)
type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals);
/* Set the type of the COMPONENT_REF to the underlying type. */
TREE_TYPE (expr) = type;
#ifdef ENABLE_TYPES_CHECKING
/* It is now a FE error, if the conversion from the canonical
type to the original expression type is not useless. */
gcc_assert (useless_type_conversion_p (old_type, type));
#endif
}
}
/* If a NOP conversion is changing a pointer to array of foo to a pointer
to foo, embed that change in the ADDR_EXPR by converting
T array[U];
(T *)&array
==>
&array[L]
where L is the lower bound. For simplicity, only do this for constant
lower bound.
The constraint is that the type of &array[L] is trivially convertible
to T *. */
static void
canonicalize_addr_expr (tree *expr_p)
{
tree expr = *expr_p;
tree addr_expr = TREE_OPERAND (expr, 0);
tree datype, ddatype, pddatype;
/* We simplify only conversions from an ADDR_EXPR to a pointer type. */
if (!POINTER_TYPE_P (TREE_TYPE (expr))
|| TREE_CODE (addr_expr) != ADDR_EXPR)
return;
/* The addr_expr type should be a pointer to an array. */
datype = TREE_TYPE (TREE_TYPE (addr_expr));
if (TREE_CODE (datype) != ARRAY_TYPE)
return;
/* The pointer to element type shall be trivially convertible to
the expression pointer type. */
ddatype = TREE_TYPE (datype);
pddatype = build_pointer_type (ddatype);
if (!useless_type_conversion_p (TYPE_MAIN_VARIANT (TREE_TYPE (expr)),
pddatype))
return;
/* The lower bound and element sizes must be constant. */
if (!TYPE_SIZE_UNIT (ddatype)
|| TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST
|| !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype))
|| TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST)
return;
/* All checks succeeded. Build a new node to merge the cast. */
*expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0),
TYPE_MIN_VALUE (TYPE_DOMAIN (datype)),
NULL_TREE, NULL_TREE);
*expr_p = build1 (ADDR_EXPR, pddatype, *expr_p);
/* We can have stripped a required restrict qualifier above. */
if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p)))
*expr_p = fold_convert (TREE_TYPE (expr), *expr_p);
}
/* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions
underneath as appropriate. */
static enum gimplify_status
gimplify_conversion (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (CONVERT_EXPR_P (*expr_p));
/* Then strip away all but the outermost conversion. */
STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0));
/* And remove the outermost conversion if it's useless. */
if (tree_ssa_useless_type_conversion (*expr_p))
*expr_p = TREE_OPERAND (*expr_p, 0);
/* If we still have a conversion at the toplevel,
then canonicalize some constructs. */
if (CONVERT_EXPR_P (*expr_p))
{
tree sub = TREE_OPERAND (*expr_p, 0);
/* If a NOP conversion is changing the type of a COMPONENT_REF
expression, then canonicalize its type now in order to expose more
redundant conversions. */
if (TREE_CODE (sub) == COMPONENT_REF)
canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0));
/* If a NOP conversion is changing a pointer to array of foo
to a pointer to foo, embed that change in the ADDR_EXPR. */
else if (TREE_CODE (sub) == ADDR_EXPR)
canonicalize_addr_expr (expr_p);
}
/* If we have a conversion to a non-register type force the
use of a VIEW_CONVERT_EXPR instead. */
if (CONVERT_EXPR_P (*expr_p) && !is_gimple_reg_type (TREE_TYPE (*expr_p)))
*expr_p = fold_build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
/* Canonicalize CONVERT_EXPR to NOP_EXPR. */
if (TREE_CODE (*expr_p) == CONVERT_EXPR)
TREE_SET_CODE (*expr_p, NOP_EXPR);
return GS_OK;
}
/* Nonlocal VLAs seen in the current function. */
static hash_set<tree> *nonlocal_vlas;
/* The VAR_DECLs created for nonlocal VLAs for debug info purposes. */
static tree nonlocal_vla_vars;
/* Gimplify a VAR_DECL or PARM_DECL. Return GS_OK if we expanded a
DECL_VALUE_EXPR, and it's worth re-examining things. */
static enum gimplify_status
gimplify_var_or_parm_decl (tree *expr_p)
{
tree decl = *expr_p;
/* ??? If this is a local variable, and it has not been seen in any
outer BIND_EXPR, then it's probably the result of a duplicate
declaration, for which we've already issued an error. It would
be really nice if the front end wouldn't leak these at all.
Currently the only known culprit is C++ destructors, as seen
in g++.old-deja/g++.jason/binding.C. */
if (TREE_CODE (decl) == VAR_DECL
&& !DECL_SEEN_IN_BIND_EXPR_P (decl)
&& !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)
&& decl_function_context (decl) == current_function_decl)
{
gcc_assert (seen_error ());
return GS_ERROR;
}
/* When within an OMP context, notice uses of variables. */
if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true))
return GS_ALL_DONE;
/* If the decl is an alias for another expression, substitute it now. */
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value_expr = DECL_VALUE_EXPR (decl);
/* For referenced nonlocal VLAs add a decl for debugging purposes
to the current function. */
if (TREE_CODE (decl) == VAR_DECL
&& TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST
&& nonlocal_vlas != NULL
&& TREE_CODE (value_expr) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (value_expr, 0)) == VAR_DECL
&& decl_function_context (decl) != current_function_decl)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx
&& (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_SIMD))
ctx = ctx->outer_context;
if (!ctx && !nonlocal_vlas->add (decl))
{
tree copy = copy_node (decl);
lang_hooks.dup_lang_specific_decl (copy);
SET_DECL_RTL (copy, 0);
TREE_USED (copy) = 1;
DECL_CHAIN (copy) = nonlocal_vla_vars;
nonlocal_vla_vars = copy;
SET_DECL_VALUE_EXPR (copy, unshare_expr (value_expr));
DECL_HAS_VALUE_EXPR_P (copy) = 1;
}
}
*expr_p = unshare_expr (value_expr);
return GS_OK;
}
return GS_ALL_DONE;
}
/* Recalculate the value of the TREE_SIDE_EFFECTS flag for T. */
static void
recalculate_side_effects (tree t)
{
enum tree_code code = TREE_CODE (t);
int len = TREE_OPERAND_LENGTH (t);
int i;
switch (TREE_CODE_CLASS (code))
{
case tcc_expression:
switch (code)
{
case INIT_EXPR:
case MODIFY_EXPR:
case VA_ARG_EXPR:
case PREDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case POSTINCREMENT_EXPR:
/* All of these have side-effects, no matter what their
operands are. */
return;
default:
break;
}
/* Fall through. */
case tcc_comparison: /* a comparison expression */
case tcc_unary: /* a unary arithmetic expression */
case tcc_binary: /* a binary arithmetic expression */
case tcc_reference: /* a reference */
case tcc_vl_exp: /* a function call */
TREE_SIDE_EFFECTS (t) = TREE_THIS_VOLATILE (t);
for (i = 0; i < len; ++i)
{
tree op = TREE_OPERAND (t, i);
if (op && TREE_SIDE_EFFECTS (op))
TREE_SIDE_EFFECTS (t) = 1;
}
break;
case tcc_constant:
/* No side-effects. */
return;
default:
gcc_unreachable ();
}
}
/* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR
node *EXPR_P.
compound_lval
: min_lval '[' val ']'
| min_lval '.' ID
| compound_lval '[' val ']'
| compound_lval '.' ID
This is not part of the original SIMPLE definition, which separates
array and member references, but it seems reasonable to handle them
together. Also, this way we don't run into problems with union
aliasing; gcc requires that for accesses through a union to alias, the
union reference must be explicit, which was not always the case when we
were splitting up array and member refs.
PRE_P points to the sequence where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the sequence where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
fallback_t fallback)
{
tree *p;
enum gimplify_status ret = GS_ALL_DONE, tret;
int i;
location_t loc = EXPR_LOCATION (*expr_p);
tree expr = *expr_p;
/* Create a stack of the subexpressions so later we can walk them in
order from inner to outer. */
auto_vec<tree, 10> expr_stack;
/* We can handle anything that get_inner_reference can deal with. */
for (p = expr_p; ; p = &TREE_OPERAND (*p, 0))
{
restart:
/* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */
if (TREE_CODE (*p) == INDIRECT_REF)
*p = fold_indirect_ref_loc (loc, *p);
if (handled_component_p (*p))
;
/* Expand DECL_VALUE_EXPR now. In some cases that may expose
additional COMPONENT_REFs. */
else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL)
&& gimplify_var_or_parm_decl (p) == GS_OK)
goto restart;
else
break;
expr_stack.safe_push (*p);
}
gcc_assert (expr_stack.length ());
/* Now EXPR_STACK is a stack of pointers to all the refs we've
walked through and P points to the innermost expression.
Java requires that we elaborated nodes in source order. That
means we must gimplify the inner expression followed by each of
the indices, in order. But we can't gimplify the inner
expression until we deal with any variable bounds, sizes, or
positions in order to deal with PLACEHOLDER_EXPRs.
So we do this in three steps. First we deal with the annotations
for any variables in the components, then we gimplify the base,
then we gimplify any indices, from left to right. */
for (i = expr_stack.length () - 1; i >= 0; i--)
{
tree t = expr_stack[i];
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the low bound and element type size and put them into
the ARRAY_REF. If these values are set, they have already been
gimplified. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree low = unshare_expr (array_ref_low_bound (t));
if (!is_gimple_min_invariant (low))
{
TREE_OPERAND (t, 2) = low;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
if (TREE_OPERAND (t, 3) == NULL_TREE)
{
tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0)));
tree elmt_size = unshare_expr (array_ref_element_size (t));
tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type));
/* Divide the element size by the alignment of the element
type (above). */
elmt_size
= size_binop_loc (loc, EXACT_DIV_EXPR, elmt_size, factor);
if (!is_gimple_min_invariant (elmt_size))
{
TREE_OPERAND (t, 3) = elmt_size;
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
else if (TREE_CODE (t) == COMPONENT_REF)
{
/* Set the field offset into T and gimplify it. */
if (TREE_OPERAND (t, 2) == NULL_TREE)
{
tree offset = unshare_expr (component_ref_field_offset (t));
tree field = TREE_OPERAND (t, 1);
tree factor
= size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT);
/* Divide the offset by its alignment. */
offset = size_binop_loc (loc, EXACT_DIV_EXPR, offset, factor);
if (!is_gimple_min_invariant (offset))
{
TREE_OPERAND (t, 2) = offset;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p,
post_p, is_gimple_reg,
fb_rvalue);
ret = MIN (ret, tret);
}
}
else
{
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
}
/* Step 2 is to gimplify the base expression. Make sure lvalue is set
so as to match the min_lval predicate. Failure to do so may result
in the creation of large aggregate temporaries. */
tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval,
fallback | fb_lvalue);
ret = MIN (ret, tret);
/* And finally, the indices and operands of ARRAY_REF. During this
loop we also remove any useless conversions. */
for (; expr_stack.length () > 0; )
{
tree t = expr_stack.pop ();
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the dimension. */
if (!is_gimple_min_invariant (TREE_OPERAND (t, 1)))
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
}
}
STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0));
/* The innermost expression P may have originally had
TREE_SIDE_EFFECTS set which would have caused all the outer
expressions in *EXPR_P leading to P to also have had
TREE_SIDE_EFFECTS set. */
recalculate_side_effects (t);
}
/* If the outermost expression is a COMPONENT_REF, canonicalize its type. */
if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF)
{
canonicalize_component_ref (expr_p);
}
expr_stack.release ();
gcc_assert (*expr_p == expr || ret != GS_ALL_DONE);
return ret;
}
/* Gimplify the self modifying expression pointed to by EXPR_P
(++, --, +=, -=).
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression.
ARITH_TYPE is the type the computation should be performed in. */
enum gimplify_status
gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value, tree arith_type)
{
enum tree_code code;
tree lhs, lvalue, rhs, t1;
gimple_seq post = NULL, *orig_post_p = post_p;
bool postfix;
enum tree_code arith_code;
enum gimplify_status ret;
location_t loc = EXPR_LOCATION (*expr_p);
code = TREE_CODE (*expr_p);
gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR
|| code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR);
/* Prefix or postfix? */
if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
/* Faster to treat as prefix if result is not used. */
postfix = want_value;
else
postfix = false;
/* For postfix, make sure the inner expression's post side effects
are executed after side effects from this expression. */
if (postfix)
post_p = &post;
/* Add or subtract? */
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
arith_code = PLUS_EXPR;
else
arith_code = MINUS_EXPR;
/* Gimplify the LHS into a GIMPLE lvalue. */
lvalue = TREE_OPERAND (*expr_p, 0);
ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* Extract the operands to the arithmetic operation. */
lhs = lvalue;
rhs = TREE_OPERAND (*expr_p, 1);
/* For postfix operator, we evaluate the LHS to an rvalue and then use
that as the result value and in the postqueue operation. */
if (postfix)
{
ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
return ret;
lhs = get_initialized_tmp_var (lhs, pre_p, NULL);
}
/* For POINTERs increment, use POINTER_PLUS_EXPR. */
if (POINTER_TYPE_P (TREE_TYPE (lhs)))
{
rhs = convert_to_ptrofftype_loc (loc, rhs);
if (arith_code == MINUS_EXPR)
rhs = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (rhs), rhs);
t1 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (*expr_p), lhs, rhs);
}
else
t1 = fold_convert (TREE_TYPE (*expr_p),
fold_build2 (arith_code, arith_type,
fold_convert (arith_type, lhs),
fold_convert (arith_type, rhs)));
if (postfix)
{
gimplify_assign (lvalue, t1, pre_p);
gimplify_seq_add_seq (orig_post_p, post);
*expr_p = lhs;
return GS_ALL_DONE;
}
else
{
*expr_p = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1);
return GS_OK;
}
}
/* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */
static void
maybe_with_size_expr (tree *expr_p)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
tree size;
/* If we've already wrapped this or the type is error_mark_node, we can't do
anything. */
if (TREE_CODE (expr) == WITH_SIZE_EXPR
|| type == error_mark_node)
return;
/* If the size isn't known or is a constant, we have nothing to do. */
size = TYPE_SIZE_UNIT (type);
if (!size || TREE_CODE (size) == INTEGER_CST)
return;
/* Otherwise, make a WITH_SIZE_EXPR. */
size = unshare_expr (size);
size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr);
*expr_p = build2 (WITH_SIZE_EXPR, type, expr, size);
}
/* Helper for gimplify_call_expr. Gimplify a single argument *ARG_P
Store any side-effects in PRE_P. CALL_LOCATION is the location of
the CALL_EXPR. */
enum gimplify_status
gimplify_arg (tree *arg_p, gimple_seq *pre_p, location_t call_location)
{
bool (*test) (tree);
fallback_t fb;
/* In general, we allow lvalues for function arguments to avoid
extra overhead of copying large aggregates out of even larger
aggregates into temporaries only to copy the temporaries to
the argument list. Make optimizers happy by pulling out to
temporaries those types that fit in registers. */
if (is_gimple_reg_type (TREE_TYPE (*arg_p)))
test = is_gimple_val, fb = fb_rvalue;
else
{
test = is_gimple_lvalue, fb = fb_either;
/* Also strip a TARGET_EXPR that would force an extra copy. */
if (TREE_CODE (*arg_p) == TARGET_EXPR)
{
tree init = TARGET_EXPR_INITIAL (*arg_p);
if (init
&& !VOID_TYPE_P (TREE_TYPE (init)))
*arg_p = init;
}
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (arg_p);
/* FIXME diagnostics: This will mess up gcc.dg/Warray-bounds.c. */
/* Make sure arguments have the same location as the function call
itself. */
protected_set_expr_location (*arg_p, call_location);
/* There is a sequence point before a function call. Side effects in
the argument list must occur before the actual call. So, when
gimplifying arguments, force gimplify_expr to use an internal
post queue which is then appended to the end of PRE_P. */
return gimplify_expr (arg_p, pre_p, NULL, test, fb);
}
/* Don't fold inside offloading or taskreg regions: it can break code by
adding decl references that weren't in the source. We'll do it during
omplower pass instead. */
static bool
maybe_fold_stmt (gimple_stmt_iterator *gsi)
{
struct gimplify_omp_ctx *ctx;
for (ctx = gimplify_omp_ctxp; ctx; ctx = ctx->outer_context)
if (ctx->region_type == ORT_TARGET
|| (ctx->region_type & (ORT_PARALLEL | ORT_TASK)) != 0)
return false;
return fold_stmt (gsi);
}
/* Gimplify the CALL_EXPR node *EXPR_P into the GIMPLE sequence PRE_P.
WANT_VALUE is true if the result of the call is desired. */
static enum gimplify_status
gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree fndecl, parms, p, fnptrtype;
enum gimplify_status ret;
int i, nargs;
gcall *call;
bool builtin_va_start_p = false;
location_t loc = EXPR_LOCATION (*expr_p);
gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR);
/* For reliable diagnostics during inlining, it is necessary that
every call_expr be annotated with file and line. */
if (! EXPR_HAS_LOCATION (*expr_p))
SET_EXPR_LOCATION (*expr_p, input_location);
/* Gimplify internal functions created in the FEs. */
if (CALL_EXPR_FN (*expr_p) == NULL_TREE)
{
if (want_value)
return GS_ALL_DONE;
nargs = call_expr_nargs (*expr_p);
enum internal_fn ifn = CALL_EXPR_IFN (*expr_p);
auto_vec<tree> vargs (nargs);
for (i = 0; i < nargs; i++)
{
gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
EXPR_LOCATION (*expr_p));
vargs.quick_push (CALL_EXPR_ARG (*expr_p, i));
}
gimple call = gimple_build_call_internal_vec (ifn, vargs);
gimplify_seq_add_stmt (pre_p, call);
return GS_ALL_DONE;
}
/* This may be a call to a builtin function.
Builtin function calls may be transformed into different
(and more efficient) builtin function calls under certain
circumstances. Unfortunately, gimplification can muck things
up enough that the builtin expanders are not aware that certain
transformations are still valid.
So we attempt transformation/gimplification of the call before
we gimplify the CALL_EXPR. At this time we do not manage to
transform all calls in the same manner as the expanders do, but
we do transform most of them. */
fndecl = get_callee_fndecl (*expr_p);
if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_VA_START:
{
builtin_va_start_p = TRUE;
if (call_expr_nargs (*expr_p) < 2)
{
error ("too few arguments to function %<va_start%>");
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
if (fold_builtin_next_arg (*expr_p, true))
{
*expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p));
return GS_OK;
}
break;
}
case BUILT_IN_LINE:
{
*expr_p = build_int_cst (TREE_TYPE (*expr_p),
LOCATION_LINE (EXPR_LOCATION (*expr_p)));
return GS_OK;
}
case BUILT_IN_FILE:
{
const char *locfile = LOCATION_FILE (EXPR_LOCATION (*expr_p));
*expr_p = build_string_literal (strlen (locfile) + 1, locfile);
return GS_OK;
}
case BUILT_IN_FUNCTION:
{
const char *function;
function = IDENTIFIER_POINTER (DECL_NAME (current_function_decl));
*expr_p = build_string_literal (strlen (function) + 1, function);
return GS_OK;
}
default:
;
}
if (fndecl && DECL_BUILT_IN (fndecl))
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
}
/* Remember the original function pointer type. */
fnptrtype = TREE_TYPE (CALL_EXPR_FN (*expr_p));
/* There is a sequence point before the call, so any side effects in
the calling expression must occur before the actual call. Force
gimplify_expr to use an internal post queue. */
ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL,
is_gimple_call_addr, fb_rvalue);
nargs = call_expr_nargs (*expr_p);
/* Get argument types for verification. */
fndecl = get_callee_fndecl (*expr_p);
parms = NULL_TREE;
if (fndecl)
parms = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
else
parms = TYPE_ARG_TYPES (TREE_TYPE (fnptrtype));
if (fndecl && DECL_ARGUMENTS (fndecl))
p = DECL_ARGUMENTS (fndecl);
else if (parms)
p = parms;
else
p = NULL_TREE;
for (i = 0; i < nargs && p; i++, p = TREE_CHAIN (p))
;
/* If the last argument is __builtin_va_arg_pack () and it is not
passed as a named argument, decrease the number of CALL_EXPR
arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */
if (!p
&& i < nargs
&& TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR)
{
tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1);
tree last_arg_fndecl = get_callee_fndecl (last_arg);
if (last_arg_fndecl
&& TREE_CODE (last_arg_fndecl) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (last_arg_fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (last_arg_fndecl) == BUILT_IN_VA_ARG_PACK)
{
tree call = *expr_p;
--nargs;
*expr_p = build_call_array_loc (loc, TREE_TYPE (call),
CALL_EXPR_FN (call),
nargs, CALL_EXPR_ARGP (call));
/* Copy all CALL_EXPR flags, location and block, except
CALL_EXPR_VA_ARG_PACK flag. */
CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call);
CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call);
CALL_EXPR_RETURN_SLOT_OPT (*expr_p)
= CALL_EXPR_RETURN_SLOT_OPT (call);
CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call);
SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (call));
/* Set CALL_EXPR_VA_ARG_PACK. */
CALL_EXPR_VA_ARG_PACK (*expr_p) = 1;
}
}
/* Gimplify the function arguments. */
if (nargs > 0)
{
for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0);
PUSH_ARGS_REVERSED ? i >= 0 : i < nargs;
PUSH_ARGS_REVERSED ? i-- : i++)
{
enum gimplify_status t;
/* Avoid gimplifying the second argument to va_start, which needs to
be the plain PARM_DECL. */
if ((i != 1) || !builtin_va_start_p)
{
t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p,
EXPR_LOCATION (*expr_p));
if (t == GS_ERROR)
ret = GS_ERROR;
}
}
}
/* Gimplify the static chain. */
if (CALL_EXPR_STATIC_CHAIN (*expr_p))
{
if (fndecl && !DECL_STATIC_CHAIN (fndecl))
CALL_EXPR_STATIC_CHAIN (*expr_p) = NULL;
else
{
enum gimplify_status t;
t = gimplify_arg (&CALL_EXPR_STATIC_CHAIN (*expr_p), pre_p,
EXPR_LOCATION (*expr_p));
if (t == GS_ERROR)
ret = GS_ERROR;
}
}
/* Verify the function result. */
if (want_value && fndecl
&& VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fnptrtype))))
{
error_at (loc, "using result of function returning %<void%>");
ret = GS_ERROR;
}
/* Try this again in case gimplification exposed something. */
if (ret != GS_ERROR)
{
tree new_tree = fold_call_expr (input_location, *expr_p, !want_value);
if (new_tree && new_tree != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new_tree;
return GS_OK;
}
}
else
{
*expr_p = error_mark_node;
return GS_ERROR;
}
/* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its
decl. This allows us to eliminate redundant or useless
calls to "const" functions. */
if (TREE_CODE (*expr_p) == CALL_EXPR)
{
int flags = call_expr_flags (*expr_p);
if (flags & (ECF_CONST | ECF_PURE)
/* An infinite loop is considered a side effect. */
&& !(flags & (ECF_LOOPING_CONST_OR_PURE)))
TREE_SIDE_EFFECTS (*expr_p) = 0;
}
/* If the value is not needed by the caller, emit a new GIMPLE_CALL
and clear *EXPR_P. Otherwise, leave *EXPR_P in its gimplified
form and delegate the creation of a GIMPLE_CALL to
gimplify_modify_expr. This is always possible because when
WANT_VALUE is true, the caller wants the result of this call into
a temporary, which means that we will emit an INIT_EXPR in
internal_get_tmp_var which will then be handled by
gimplify_modify_expr. */
if (!want_value)
{
/* The CALL_EXPR in *EXPR_P is already in GIMPLE form, so all we
have to do is replicate it as a GIMPLE_CALL tuple. */
gimple_stmt_iterator gsi;
call = gimple_build_call_from_tree (*expr_p);
gimple_call_set_fntype (call, TREE_TYPE (fnptrtype));
notice_special_calls (call);
gimplify_seq_add_stmt (pre_p, call);
gsi = gsi_last (*pre_p);
maybe_fold_stmt (&gsi);
*expr_p = NULL_TREE;
}
else
/* Remember the original function type. */
CALL_EXPR_FN (*expr_p) = build1 (NOP_EXPR, fnptrtype,
CALL_EXPR_FN (*expr_p));
return ret;
}
/* Handle shortcut semantics in the predicate operand of a COND_EXPR by
rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs.
TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the
condition is true or false, respectively. If null, we should generate
our own to skip over the evaluation of this specific expression.
LOCUS is the source location of the COND_EXPR.
This function is the tree equivalent of do_jump.
shortcut_cond_r should only be called by shortcut_cond_expr. */
static tree
shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p,
location_t locus)
{
tree local_label = NULL_TREE;
tree t, expr = NULL;
/* OK, it's not a simple case; we need to pull apart the COND_EXPR to
retain the shortcut semantics. Just insert the gotos here;
shortcut_cond_expr will append the real blocks later. */
if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
location_t new_locus;
/* Turn if (a && b) into
if (a); else goto no;
if (b) goto yes; else goto no;
(no:) */
if (false_label_p == NULL)
false_label_p = &local_label;
/* Keep the original source location on the first 'if'. */
t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p, locus);
append_to_statement_list (t, &expr);
/* Set the source location of the && on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p,
new_locus);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
location_t new_locus;
/* Turn if (a || b) into
if (a) goto yes;
if (b) goto yes; else goto no;
(yes:) */
if (true_label_p == NULL)
true_label_p = &local_label;
/* Keep the original source location on the first 'if'. */
t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL, locus);
append_to_statement_list (t, &expr);
/* Set the source location of the || on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p,
new_locus);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == COND_EXPR
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 1)))
&& !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 2))))
{
location_t new_locus;
/* As long as we're messing with gotos, turn if (a ? b : c) into
if (a)
if (b) goto yes; else goto no;
else
if (c) goto yes; else goto no;
Don't do this if one of the arms has void type, which can happen
in C++ when the arm is throw. */
/* Keep the original source location on the first 'if'. Set the source
location of the ? on the second 'if'. */
new_locus = EXPR_HAS_LOCATION (pred) ? EXPR_LOCATION (pred) : locus;
expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0),
shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p, locus),
shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p,
false_label_p, new_locus));
}
else
{
expr = build3 (COND_EXPR, void_type_node, pred,
build_and_jump (true_label_p),
build_and_jump (false_label_p));
SET_EXPR_LOCATION (expr, locus);
}
if (local_label)
{
t = build1 (LABEL_EXPR, void_type_node, local_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* Given a conditional expression EXPR with short-circuit boolean
predicates using TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR, break the
predicate apart into the equivalent sequence of conditionals. */
static tree
shortcut_cond_expr (tree expr)
{
tree pred = TREE_OPERAND (expr, 0);
tree then_ = TREE_OPERAND (expr, 1);
tree else_ = TREE_OPERAND (expr, 2);
tree true_label, false_label, end_label, t;
tree *true_label_p;
tree *false_label_p;
bool emit_end, emit_false, jump_over_else;
bool then_se = then_ && TREE_SIDE_EFFECTS (then_);
bool else_se = else_ && TREE_SIDE_EFFECTS (else_);
/* First do simple transformations. */
if (!else_se)
{
/* If there is no 'else', turn
if (a && b) then c
into
if (a) if (b) then c. */
while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
/* Keep the original source location on the first 'if'. */
location_t locus = EXPR_LOC_OR_LOC (expr, input_location);
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
/* Set the source location of the && on the second 'if'. */
if (EXPR_HAS_LOCATION (pred))
SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred));
then_ = shortcut_cond_expr (expr);
then_se = then_ && TREE_SIDE_EFFECTS (then_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE);
SET_EXPR_LOCATION (expr, locus);
}
}
if (!then_se)
{
/* If there is no 'then', turn
if (a || b); else d
into
if (a); else if (b); else d. */
while (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
/* Keep the original source location on the first 'if'. */
location_t locus = EXPR_LOC_OR_LOC (expr, input_location);
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
/* Set the source location of the || on the second 'if'. */
if (EXPR_HAS_LOCATION (pred))
SET_EXPR_LOCATION (expr, EXPR_LOCATION (pred));
else_ = shortcut_cond_expr (expr);
else_se = else_ && TREE_SIDE_EFFECTS (else_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_);
SET_EXPR_LOCATION (expr, locus);
}
}
/* If we're done, great. */
if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR
&& TREE_CODE (pred) != TRUTH_ORIF_EXPR)
return expr;
/* Otherwise we need to mess with gotos. Change
if (a) c; else d;
to
if (a); else goto no;
c; goto end;
no: d; end:
and recursively gimplify the condition. */
true_label = false_label = end_label = NULL_TREE;
/* If our arms just jump somewhere, hijack those labels so we don't
generate jumps to jumps. */
if (then_
&& TREE_CODE (then_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL)
{
true_label = GOTO_DESTINATION (then_);
then_ = NULL;
then_se = false;
}
if (else_
&& TREE_CODE (else_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL)
{
false_label = GOTO_DESTINATION (else_);
else_ = NULL;
else_se = false;
}
/* If we aren't hijacking a label for the 'then' branch, it falls through. */
if (true_label)
true_label_p = &true_label;
else
true_label_p = NULL;
/* The 'else' branch also needs a label if it contains interesting code. */
if (false_label || else_se)
false_label_p = &false_label;
else
false_label_p = NULL;
/* If there was nothing else in our arms, just forward the label(s). */
if (!then_se && !else_se)
return shortcut_cond_r (pred, true_label_p, false_label_p,
EXPR_LOC_OR_LOC (expr, input_location));
/* If our last subexpression already has a terminal label, reuse it. */
if (else_se)
t = expr_last (else_);
else if (then_se)
t = expr_last (then_);
else
t = NULL;
if (t && TREE_CODE (t) == LABEL_EXPR)
end_label = LABEL_EXPR_LABEL (t);
/* If we don't care about jumping to the 'else' branch, jump to the end
if the condition is false. */
if (!false_label_p)
false_label_p = &end_label;
/* We only want to emit these labels if we aren't hijacking them. */
emit_end = (end_label == NULL_TREE);
emit_false = (false_label == NULL_TREE);
/* We only emit the jump over the else clause if we have to--if the
then clause may fall through. Otherwise we can wind up with a
useless jump and a useless label at the end of gimplified code,
which will cause us to think that this conditional as a whole
falls through even if it doesn't. If we then inline a function
which ends with such a condition, that can cause us to issue an
inappropriate warning about control reaching the end of a
non-void function. */
jump_over_else = block_may_fallthru (then_);
pred = shortcut_cond_r (pred, true_label_p, false_label_p,
EXPR_LOC_OR_LOC (expr, input_location));
expr = NULL;
append_to_statement_list (pred, &expr);
append_to_statement_list (then_, &expr);
if (else_se)
{
if (jump_over_else)
{
tree last = expr_last (expr);
t = build_and_jump (&end_label);
if (EXPR_HAS_LOCATION (last))
SET_EXPR_LOCATION (t, EXPR_LOCATION (last));
append_to_statement_list (t, &expr);
}
if (emit_false)
{
t = build1 (LABEL_EXPR, void_type_node, false_label);
append_to_statement_list (t, &expr);
}
append_to_statement_list (else_, &expr);
}
if (emit_end && end_label)
{
t = build1 (LABEL_EXPR, void_type_node, end_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */
tree
gimple_boolify (tree expr)
{
tree type = TREE_TYPE (expr);
location_t loc = EXPR_LOCATION (expr);
if (TREE_CODE (expr) == NE_EXPR
&& TREE_CODE (TREE_OPERAND (expr, 0)) == CALL_EXPR
&& integer_zerop (TREE_OPERAND (expr, 1)))
{
tree call = TREE_OPERAND (expr, 0);
tree fn = get_callee_fndecl (call);
/* For __builtin_expect ((long) (x), y) recurse into x as well
if x is truth_value_p. */
if (fn
&& DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fn) == BUILT_IN_EXPECT
&& call_expr_nargs (call) == 2)
{
tree arg = CALL_EXPR_ARG (call, 0);
if (arg)
{
if (TREE_CODE (arg) == NOP_EXPR
&& TREE_TYPE (arg) == TREE_TYPE (call))
arg = TREE_OPERAND (arg, 0);
if (truth_value_p (TREE_CODE (arg)))
{
arg = gimple_boolify (arg);
CALL_EXPR_ARG (call, 0)
= fold_convert_loc (loc, TREE_TYPE (call), arg);
}
}
}
}
switch (TREE_CODE (expr))
{
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
/* Also boolify the arguments of truth exprs. */
TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1));
/* FALLTHRU */
case TRUTH_NOT_EXPR:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* These expressions always produce boolean results. */
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
case ANNOTATE_EXPR:
switch ((enum annot_expr_kind) TREE_INT_CST_LOW (TREE_OPERAND (expr, 1)))
{
case annot_expr_ivdep_kind:
case annot_expr_no_vector_kind:
case annot_expr_vector_kind:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
default:
gcc_unreachable ();
}
default:
if (COMPARISON_CLASS_P (expr))
{
/* There expressions always prduce boolean results. */
if (TREE_CODE (type) != BOOLEAN_TYPE)
TREE_TYPE (expr) = boolean_type_node;
return expr;
}
/* Other expressions that get here must have boolean values, but
might need to be converted to the appropriate mode. */
if (TREE_CODE (type) == BOOLEAN_TYPE)
return expr;
return fold_convert_loc (loc, boolean_type_node, expr);
}
}
/* Given a conditional expression *EXPR_P without side effects, gimplify
its operands. New statements are inserted to PRE_P. */
static enum gimplify_status
gimplify_pure_cond_expr (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, cond;
enum gimplify_status ret, tret;
enum tree_code code;
cond = gimple_boolify (COND_EXPR_COND (expr));
/* We need to handle && and || specially, as their gimplification
creates pure cond_expr, thus leading to an infinite cycle otherwise. */
code = TREE_CODE (cond);
if (code == TRUTH_ANDIF_EXPR)
TREE_SET_CODE (cond, TRUTH_AND_EXPR);
else if (code == TRUTH_ORIF_EXPR)
TREE_SET_CODE (cond, TRUTH_OR_EXPR);
ret = gimplify_expr (&cond, pre_p, NULL, is_gimple_condexpr, fb_rvalue);
COND_EXPR_COND (*expr_p) = cond;
tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
return MIN (ret, tret);
}
/* Return true if evaluating EXPR could trap.
EXPR is GENERIC, while tree_could_trap_p can be called
only on GIMPLE. */
static bool
generic_expr_could_trap_p (tree expr)
{
unsigned i, n;
if (!expr || is_gimple_val (expr))
return false;
if (!EXPR_P (expr) || tree_could_trap_p (expr))
return true;
n = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < n; i++)
if (generic_expr_could_trap_p (TREE_OPERAND (expr, i)))
return true;
return false;
}
/* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;'
into
if (p) if (p)
t1 = a; a;
else or else
t1 = b; b;
t1;
The second form is used when *EXPR_P is of type void.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_cond_expr (tree *expr_p, gimple_seq *pre_p, fallback_t fallback)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
location_t loc = EXPR_LOCATION (expr);
tree tmp, arm1, arm2;
enum gimplify_status ret;
tree label_true, label_false, label_cont;
bool have_then_clause_p, have_else_clause_p;
gcond *cond_stmt;
enum tree_code pred_code;
gimple_seq seq = NULL;
/* If this COND_EXPR has a value, copy the values into a temporary within
the arms. */
if (!VOID_TYPE_P (type))
{
tree then_ = TREE_OPERAND (expr, 1), else_ = TREE_OPERAND (expr, 2);
tree result;
/* If either an rvalue is ok or we do not require an lvalue, create the
temporary. But we cannot do that if the type is addressable. */
if (((fallback & fb_rvalue) || !(fallback & fb_lvalue))
&& !TREE_ADDRESSABLE (type))
{
if (gimplify_ctxp->allow_rhs_cond_expr
/* If either branch has side effects or could trap, it can't be
evaluated unconditionally. */
&& !TREE_SIDE_EFFECTS (then_)
&& !generic_expr_could_trap_p (then_)
&& !TREE_SIDE_EFFECTS (else_)
&& !generic_expr_could_trap_p (else_))
return gimplify_pure_cond_expr (expr_p, pre_p);
tmp = create_tmp_var (type, "iftmp");
result = tmp;
}
/* Otherwise, only create and copy references to the values. */
else
{
type = build_pointer_type (type);
if (!VOID_TYPE_P (TREE_TYPE (then_)))
then_ = build_fold_addr_expr_loc (loc, then_);
if (!VOID_TYPE_P (TREE_TYPE (else_)))
else_ = build_fold_addr_expr_loc (loc, else_);
expr
= build3 (COND_EXPR, type, TREE_OPERAND (expr, 0), then_, else_);
tmp = create_tmp_var (type, "iftmp");
result = build_simple_mem_ref_loc (loc, tmp);
}
/* Build the new then clause, `tmp = then_;'. But don't build the
assignment if the value is void; in C++ it can be if it's a throw. */
if (!VOID_TYPE_P (TREE_TYPE (then_)))
TREE_OPERAND (expr, 1) = build2 (MODIFY_EXPR, type, tmp, then_);
/* Similarly, build the new else clause, `tmp = else_;'. */
if (!VOID_TYPE_P (TREE_TYPE (else_)))
TREE_OPERAND (expr, 2) = build2 (MODIFY_EXPR, type, tmp, else_);
TREE_TYPE (expr) = void_type_node;
recalculate_side_effects (expr);
/* Move the COND_EXPR to the prequeue. */
gimplify_stmt (&expr, pre_p);
*expr_p = result;
return GS_ALL_DONE;
}
/* Remove any COMPOUND_EXPR so the following cases will be caught. */
STRIP_TYPE_NOPS (TREE_OPERAND (expr, 0));
if (TREE_CODE (TREE_OPERAND (expr, 0)) == COMPOUND_EXPR)
gimplify_compound_expr (&TREE_OPERAND (expr, 0), pre_p, true);
/* Make sure the condition has BOOLEAN_TYPE. */
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* Break apart && and || conditions. */
if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR
|| TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR)
{
expr = shortcut_cond_expr (expr);
if (expr != *expr_p)
{
*expr_p = expr;
/* We can't rely on gimplify_expr to re-gimplify the expanded
form properly, as cleanups might cause the target labels to be
wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to
set up a conditional context. */
gimple_push_condition ();
gimplify_stmt (expr_p, &seq);
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
return GS_ALL_DONE;
}
}
/* Now do the normal gimplification. */
/* Gimplify condition. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr,
fb_rvalue);
if (ret == GS_ERROR)
return GS_ERROR;
gcc_assert (TREE_OPERAND (expr, 0) != NULL_TREE);
gimple_push_condition ();
have_then_clause_p = have_else_clause_p = false;
if (TREE_OPERAND (expr, 1) != NULL
&& TREE_CODE (TREE_OPERAND (expr, 1)) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 1))) == LABEL_DECL
&& (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 1)))
== current_function_decl)
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 1))
|| EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 1))))
{
label_true = GOTO_DESTINATION (TREE_OPERAND (expr, 1));
have_then_clause_p = true;
}
else
label_true = create_artificial_label (UNKNOWN_LOCATION);
if (TREE_OPERAND (expr, 2) != NULL
&& TREE_CODE (TREE_OPERAND (expr, 2)) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (TREE_OPERAND (expr, 2))) == LABEL_DECL
&& (DECL_CONTEXT (GOTO_DESTINATION (TREE_OPERAND (expr, 2)))
== current_function_decl)
/* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR
have different locations, otherwise we end up with incorrect
location information on the branches. */
&& (optimize
|| !EXPR_HAS_LOCATION (expr)
|| !EXPR_HAS_LOCATION (TREE_OPERAND (expr, 2))
|| EXPR_LOCATION (expr) == EXPR_LOCATION (TREE_OPERAND (expr, 2))))
{
label_false = GOTO_DESTINATION (TREE_OPERAND (expr, 2));
have_else_clause_p = true;
}
else
label_false = create_artificial_label (UNKNOWN_LOCATION);
gimple_cond_get_ops_from_tree (COND_EXPR_COND (expr), &pred_code, &arm1,
&arm2);
cond_stmt = gimple_build_cond (pred_code, arm1, arm2, label_true,
label_false);
gimplify_seq_add_stmt (&seq, cond_stmt);
label_cont = NULL_TREE;
if (!have_then_clause_p)
{
/* For if (...) {} else { code; } put label_true after
the else block. */
if (TREE_OPERAND (expr, 1) == NULL_TREE
&& !have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE)
label_cont = label_true;
else
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_true));
have_then_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 1), &seq);
/* For if (...) { code; } else {} or
if (...) { code; } else goto label; or
if (...) { code; return; } else { ... }
label_cont isn't needed. */
if (!have_else_clause_p
&& TREE_OPERAND (expr, 2) != NULL_TREE
&& gimple_seq_may_fallthru (seq))
{
gimple g;
label_cont = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_goto (label_cont);
/* GIMPLE_COND's are very low level; they have embedded
gotos. This particular embedded goto should not be marked
with the location of the original COND_EXPR, as it would
correspond to the COND_EXPR's condition, not the ELSE or the
THEN arms. To avoid marking it with the wrong location, flag
it as "no location". */
gimple_set_do_not_emit_location (g);
gimplify_seq_add_stmt (&seq, g);
}
}
}
if (!have_else_clause_p)
{
gimplify_seq_add_stmt (&seq, gimple_build_label (label_false));
have_else_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 2), &seq);
}
if (label_cont)
gimplify_seq_add_stmt (&seq, gimple_build_label (label_cont));
gimple_pop_condition (pre_p);
gimple_seq_add_seq (pre_p, seq);
if (ret == GS_ERROR)
; /* Do nothing. */
else if (have_then_clause_p || have_else_clause_p)
ret = GS_ALL_DONE;
else
{
/* Both arms are empty; replace the COND_EXPR with its predicate. */
expr = TREE_OPERAND (expr, 0);
gimplify_stmt (&expr, pre_p);
}
*expr_p = NULL;
return ret;
}
/* Prepare the node pointed to by EXPR_P, an is_gimple_addressable expression,
to be marked addressable.
We cannot rely on such an expression being directly markable if a temporary
has been created by the gimplification. In this case, we create another
temporary and initialize it with a copy, which will become a store after we
mark it addressable. This can happen if the front-end passed us something
that it could not mark addressable yet, like a Fortran pass-by-reference
parameter (int) floatvar. */
static void
prepare_gimple_addressable (tree *expr_p, gimple_seq *seq_p)
{
while (handled_component_p (*expr_p))
expr_p = &TREE_OPERAND (*expr_p, 0);
if (is_gimple_reg (*expr_p))
{
tree var = get_initialized_tmp_var (*expr_p, seq_p, NULL);
DECL_GIMPLE_REG_P (var) = 0;
*expr_p = var;
}
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memcpy. */
static enum gimplify_status
gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, to, to_ptr, from, from_ptr;
gcall *gs;
location_t loc = EXPR_LOCATION (*expr_p);
to = TREE_OPERAND (*expr_p, 0);
from = TREE_OPERAND (*expr_p, 1);
/* Mark the RHS addressable. Beware that it may not be possible to do so
directly if a temporary has been created by the gimplification. */
prepare_gimple_addressable (&from, seq_p);
mark_addressable (from);
from_ptr = build_fold_addr_expr_loc (loc, from);
gimplify_arg (&from_ptr, seq_p, loc);
mark_addressable (to);
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
t = builtin_decl_implicit (BUILT_IN_MEMCPY);
gs = gimple_build_call (t, 3, to_ptr, from_ptr, size);
if (want_value)
{
/* tmp = memcpy() */
t = create_tmp_var (TREE_TYPE (to_ptr));
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build_simple_mem_ref (t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memset. In this case we know that the RHS is
a CONSTRUCTOR with an empty element list. */
static enum gimplify_status
gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value,
gimple_seq *seq_p)
{
tree t, from, to, to_ptr;
gcall *gs;
location_t loc = EXPR_LOCATION (*expr_p);
/* Assert our assumptions, to abort instead of producing wrong code
silently if they are not met. Beware that the RHS CONSTRUCTOR might
not be immediately exposed. */
from = TREE_OPERAND (*expr_p, 1);
if (TREE_CODE (from) == WITH_SIZE_EXPR)
from = TREE_OPERAND (from, 0);
gcc_assert (TREE_CODE (from) == CONSTRUCTOR
&& vec_safe_is_empty (CONSTRUCTOR_ELTS (from)));
/* Now proceed. */
to = TREE_OPERAND (*expr_p, 0);
to_ptr = build_fold_addr_expr_loc (loc, to);
gimplify_arg (&to_ptr, seq_p, loc);
t = builtin_decl_implicit (BUILT_IN_MEMSET);
gs = gimple_build_call (t, 3, to_ptr, integer_zero_node, size);
if (want_value)
{
/* tmp = memset() */
t = create_tmp_var (TREE_TYPE (to_ptr));
gimple_call_set_lhs (gs, t);
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t);
return GS_ALL_DONE;
}
gimplify_seq_add_stmt (seq_p, gs);
*expr_p = NULL;
return GS_ALL_DONE;
}
/* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree,
determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an
assignment. Return non-null if we detect a potential overlap. */
struct gimplify_init_ctor_preeval_data
{
/* The base decl of the lhs object. May be NULL, in which case we
have to assume the lhs is indirect. */
tree lhs_base_decl;
/* The alias set of the lhs object. */
alias_set_type lhs_alias_set;
};
static tree
gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata)
{
struct gimplify_init_ctor_preeval_data *data
= (struct gimplify_init_ctor_preeval_data *) xdata;
tree t = *tp;
/* If we find the base object, obviously we have overlap. */
if (data->lhs_base_decl == t)
return t;
/* If the constructor component is indirect, determine if we have a
potential overlap with the lhs. The only bits of information we
have to go on at this point are addressability and alias sets. */
if ((INDIRECT_REF_P (t)
|| TREE_CODE (t) == MEM_REF)
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t)))
return t;
/* If the constructor component is a call, determine if it can hide a
potential overlap with the lhs through an INDIRECT_REF like above.
??? Ugh - this is completely broken. In fact this whole analysis
doesn't look conservative. */
if (TREE_CODE (t) == CALL_EXPR)
{
tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t)));
for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type))
if (POINTER_TYPE_P (TREE_VALUE (type))
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set,
get_alias_set
(TREE_TYPE (TREE_VALUE (type)))))
return t;
}
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
return NULL;
}
/* A subroutine of gimplify_init_constructor. Pre-evaluate EXPR,
force values that overlap with the lhs (as described by *DATA)
into temporaries. */
static void
gimplify_init_ctor_preeval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
struct gimplify_init_ctor_preeval_data *data)
{
enum gimplify_status one;
/* If the value is constant, then there's nothing to pre-evaluate. */
if (TREE_CONSTANT (*expr_p))
{
/* Ensure it does not have side effects, it might contain a reference to
the object we're initializing. */
gcc_assert (!TREE_SIDE_EFFECTS (*expr_p));
return;
}
/* If the type has non-trivial constructors, we can't pre-evaluate. */
if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p)))
return;
/* Recurse for nested constructors. */
if (TREE_CODE (*expr_p) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (*expr_p);
FOR_EACH_VEC_SAFE_ELT (v, ix, ce)
gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data);
return;
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (expr_p);
/* Gimplify the constructor element to something appropriate for the rhs
of a MODIFY_EXPR. Given that we know the LHS is an aggregate, we know
the gimplifier will consider this a store to memory. Doing this
gimplification now means that we won't have to deal with complicated
language-specific trees, nor trees like SAVE_EXPR that can induce
exponential search behavior. */
one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue);
if (one == GS_ERROR)
{
*expr_p = NULL;
return;
}
/* If we gimplified to a bare decl, we can be sure that it doesn't overlap
with the lhs, since "a = { .x=a }" doesn't make sense. This will
always be true for all scalars, since is_gimple_mem_rhs insists on a
temporary variable for them. */
if (DECL_P (*expr_p))
return;
/* If this is of variable size, we have no choice but to assume it doesn't
overlap since we can't make a temporary for it. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST)
return;
/* Otherwise, we must search for overlap ... */
if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL))
return;
/* ... and if found, force the value into a temporary. */
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
/* A subroutine of gimplify_init_ctor_eval. Create a loop for
a RANGE_EXPR in a CONSTRUCTOR for an array.
var = lower;
loop_entry:
object[var] = value;
if (var == upper)
goto loop_exit;
var = var + 1;
goto loop_entry;
loop_exit:
We increment var _after_ the loop exit check because we might otherwise
fail if upper == TYPE_MAX_VALUE (type for upper).
Note that we never have to deal with SAVE_EXPRs here, because this has
already been taken care of for us, in gimplify_init_ctor_preeval(). */
static void gimplify_init_ctor_eval (tree, vec<constructor_elt, va_gc> *,
gimple_seq *, bool);
static void
gimplify_init_ctor_eval_range (tree object, tree lower, tree upper,
tree value, tree array_elt_type,
gimple_seq *pre_p, bool cleared)
{
tree loop_entry_label, loop_exit_label, fall_thru_label;
tree var, var_type, cref, tmp;
loop_entry_label = create_artificial_label (UNKNOWN_LOCATION);
loop_exit_label = create_artificial_label (UNKNOWN_LOCATION);
fall_thru_label = create_artificial_label (UNKNOWN_LOCATION);
/* Create and initialize the index variable. */
var_type = TREE_TYPE (upper);
var = create_tmp_var (var_type);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, lower));
/* Add the loop entry label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_entry_label));
/* Build the reference. */
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
var, NULL_TREE, NULL_TREE);
/* If we are a constructor, just call gimplify_init_ctor_eval to do
the store. Otherwise just assign value to the reference. */
if (TREE_CODE (value) == CONSTRUCTOR)
/* NB we might have to call ourself recursively through
gimplify_init_ctor_eval if the value is a constructor. */
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
gimplify_seq_add_stmt (pre_p, gimple_build_assign (cref, value));
/* We exit the loop when the index var is equal to the upper bound. */
gimplify_seq_add_stmt (pre_p,
gimple_build_cond (EQ_EXPR, var, upper,
loop_exit_label, fall_thru_label));
gimplify_seq_add_stmt (pre_p, gimple_build_label (fall_thru_label));
/* Otherwise, increment the index var... */
tmp = build2 (PLUS_EXPR, var_type, var,
fold_convert (var_type, integer_one_node));
gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, tmp));
/* ...and jump back to the loop entry. */
gimplify_seq_add_stmt (pre_p, gimple_build_goto (loop_entry_label));
/* Add the loop exit label. */
gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_exit_label));
}
/* Return true if FDECL is accessing a field that is zero sized. */
static bool
zero_sized_field_decl (const_tree fdecl)
{
if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl)
&& integer_zerop (DECL_SIZE (fdecl)))
return true;
return false;
}
/* Return true if TYPE is zero sized. */
static bool
zero_sized_type (const_tree type)
{
if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type)
&& integer_zerop (TYPE_SIZE (type)))
return true;
return false;
}
/* A subroutine of gimplify_init_constructor. Generate individual
MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the
assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the
CONSTRUCTOR. CLEARED is true if the entire LHS object has been
zeroed first. */
static void
gimplify_init_ctor_eval (tree object, vec<constructor_elt, va_gc> *elts,
gimple_seq *pre_p, bool cleared)
{
tree array_elt_type = NULL;
unsigned HOST_WIDE_INT ix;
tree purpose, value;
if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE)
array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object)));
FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value)
{
tree cref;
/* NULL values are created above for gimplification errors. */
if (value == NULL)
continue;
if (cleared && initializer_zerop (value))
continue;
/* ??? Here's to hoping the front end fills in all of the indices,
so we don't have to figure out what's missing ourselves. */
gcc_assert (purpose);
/* Skip zero-sized fields, unless value has side-effects. This can
happen with calls to functions returning a zero-sized type, which
we shouldn't discard. As a number of downstream passes don't
expect sets of zero-sized fields, we rely on the gimplification of
the MODIFY_EXPR we make below to drop the assignment statement. */
if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose))
continue;
/* If we have a RANGE_EXPR, we have to build a loop to assign the
whole range. */
if (TREE_CODE (purpose) == RANGE_EXPR)
{
tree lower = TREE_OPERAND (purpose, 0);
tree upper = TREE_OPERAND (purpose, 1);
/* If the lower bound is equal to upper, just treat it as if
upper was the index. */
if (simple_cst_equal (lower, upper))
purpose = upper;
else
{
gimplify_init_ctor_eval_range (object, lower, upper, value,
array_elt_type, pre_p, cleared);
continue;
}
}
if (array_elt_type)
{
/* Do not use bitsizetype for ARRAY_REF indices. */
if (TYPE_DOMAIN (TREE_TYPE (object)))
purpose
= fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))),
purpose);
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
purpose, NULL_TREE, NULL_TREE);
}
else
{
gcc_assert (TREE_CODE (purpose) == FIELD_DECL);
cref = build3 (COMPONENT_REF, TREE_TYPE (purpose),
unshare_expr (object), purpose, NULL_TREE);
}
if (TREE_CODE (value) == CONSTRUCTOR
&& TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE)
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
{
tree init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value);
gimplify_and_add (init, pre_p);
ggc_free (init);
}
}
}
/* Return the appropriate RHS predicate for this LHS. */
gimple_predicate
rhs_predicate_for (tree lhs)
{
if (is_gimple_reg (lhs))
return is_gimple_reg_rhs_or_call;
else
return is_gimple_mem_rhs_or_call;
}
/* Gimplify a C99 compound literal expression. This just means adding
the DECL_EXPR before the current statement and using its anonymous
decl instead. */
static enum gimplify_status
gimplify_compound_literal_expr (tree *expr_p, gimple_seq *pre_p,
bool (*gimple_test_f) (tree),
fallback_t fallback)
{
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (*expr_p);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
/* Mark the decl as addressable if the compound literal
expression is addressable now, otherwise it is marked too late
after we gimplify the initialization expression. */
if (TREE_ADDRESSABLE (*expr_p))
TREE_ADDRESSABLE (decl) = 1;
/* Otherwise, if we don't need an lvalue and have a literal directly
substitute it. Check if it matches the gimple predicate, as
otherwise we'd generate a new temporary, and we can as well just
use the decl we already have. */
else if (!TREE_ADDRESSABLE (decl)
&& init
&& (fallback & fb_lvalue) == 0
&& gimple_test_f (init))
{
*expr_p = init;
return GS_OK;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (decl)
&& !needs_to_live_in_memory (decl))
DECL_GIMPLE_REG_P (decl) = 1;
/* If the decl is not addressable, then it is being used in some
expression or on the right hand side of a statement, and it can
be put into a readonly data section. */
if (!TREE_ADDRESSABLE (decl) && (fallback & fb_lvalue) == 0)
TREE_READONLY (decl) = 1;
/* This decl isn't mentioned in the enclosing block, so add it to the
list of temps. FIXME it seems a bit of a kludge to say that
anonymous artificial vars aren't pushed, but everything else is. */
if (DECL_NAME (decl) == NULL_TREE && !DECL_SEEN_IN_BIND_EXPR_P (decl))
gimple_add_tmp_var (decl);
gimplify_and_add (decl_s, pre_p);
*expr_p = decl;
return GS_OK;
}
/* Optimize embedded COMPOUND_LITERAL_EXPRs within a CONSTRUCTOR,
return a new CONSTRUCTOR if something changed. */
static tree
optimize_compound_literals_in_ctor (tree orig_ctor)
{
tree ctor = orig_ctor;
vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (ctor);
unsigned int idx, num = vec_safe_length (elts);
for (idx = 0; idx < num; idx++)
{
tree value = (*elts)[idx].value;
tree newval = value;
if (TREE_CODE (value) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (value);
else if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR)
{
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (value);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
if (!TREE_ADDRESSABLE (value)
&& !TREE_ADDRESSABLE (decl)
&& init
&& TREE_CODE (init) == CONSTRUCTOR)
newval = optimize_compound_literals_in_ctor (init);
}
if (newval == value)
continue;
if (ctor == orig_ctor)
{
ctor = copy_node (orig_ctor);
CONSTRUCTOR_ELTS (ctor) = vec_safe_copy (elts);
elts = CONSTRUCTOR_ELTS (ctor);
}
(*elts)[idx].value = newval;
}
return ctor;
}
/* A subroutine of gimplify_modify_expr. Break out elements of a
CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs.
Note that we still need to clear any elements that don't have explicit
initializers, so if not all elements are initialized we keep the
original MODIFY_EXPR, we just remove all of the constructor elements.
If NOTIFY_TEMP_CREATION is true, do not gimplify, just return
GS_ERROR if we would have to create a temporary when gimplifying
this constructor. Otherwise, return GS_OK.
If NOTIFY_TEMP_CREATION is false, just do the gimplification. */
static enum gimplify_status
gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value, bool notify_temp_creation)
{
tree object, ctor, type;
enum gimplify_status ret;
vec<constructor_elt, va_gc> *elts;
gcc_assert (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == CONSTRUCTOR);
if (!notify_temp_creation)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
}
object = TREE_OPERAND (*expr_p, 0);
ctor = TREE_OPERAND (*expr_p, 1) =
optimize_compound_literals_in_ctor (TREE_OPERAND (*expr_p, 1));
type = TREE_TYPE (ctor);
elts = CONSTRUCTOR_ELTS (ctor);
ret = GS_ALL_DONE;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
case ARRAY_TYPE:
{
struct gimplify_init_ctor_preeval_data preeval_data;
HOST_WIDE_INT num_ctor_elements, num_nonzero_elements;
bool cleared, complete_p, valid_const_initializer;
/* Aggregate types must lower constructors to initialization of
individual elements. The exception is that a CONSTRUCTOR node
with no elements indicates zero-initialization of the whole. */
if (vec_safe_is_empty (elts))
{
if (notify_temp_creation)
return GS_OK;
break;
}
/* Fetch information about the constructor to direct later processing.
We might want to make static versions of it in various cases, and
can only do so if it known to be a valid constant initializer. */
valid_const_initializer
= categorize_ctor_elements (ctor, &num_nonzero_elements,
&num_ctor_elements, &complete_p);
/* If a const aggregate variable is being initialized, then it
should never be a lose to promote the variable to be static. */
if (valid_const_initializer
&& num_nonzero_elements > 1
&& TREE_READONLY (object)
&& TREE_CODE (object) == VAR_DECL
&& (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object)))
{
if (notify_temp_creation)
return GS_ERROR;
DECL_INITIAL (object) = ctor;
TREE_STATIC (object) = 1;
if (!DECL_NAME (object))
DECL_NAME (object) = create_tmp_var_name ("C");
walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL);
/* ??? C++ doesn't automatically append a .<number> to the
assembler name, and even when it does, it looks at FE private
data structures to figure out what that number should be,
which are not set for this variable. I suppose this is
important for local statics for inline functions, which aren't
"local" in the object file sense. So in order to get a unique
TU-local symbol, we must invoke the lhd version now. */
lhd_set_decl_assembler_name (object);
*expr_p = NULL_TREE;
break;
}
/* If there are "lots" of initialized elements, even discounting
those that are not address constants (and thus *must* be
computed at runtime), then partition the constructor into
constant and non-constant parts. Block copy the constant
parts in, then generate code for the non-constant parts. */
/* TODO. There's code in cp/typeck.c to do this. */
if (int_size_in_bytes (TREE_TYPE (ctor)) < 0)
/* store_constructor will ignore the clearing of variable-sized
objects. Initializers for such objects must explicitly set
every field that needs to be set. */
cleared = false;
else if (!complete_p && !CONSTRUCTOR_NO_CLEARING (ctor))
/* If the constructor isn't complete, clear the whole object
beforehand, unless CONSTRUCTOR_NO_CLEARING is set on it.
??? This ought not to be needed. For any element not present
in the initializer, we should simply set them to zero. Except
we'd need to *find* the elements that are not present, and that
requires trickery to avoid quadratic compile-time behavior in
large cases or excessive memory use in small cases. */
cleared = true;
else if (num_ctor_elements - num_nonzero_elements
> CLEAR_RATIO (optimize_function_for_speed_p (cfun))
&& num_nonzero_elements < num_ctor_elements / 4)
/* If there are "lots" of zeros, it's more efficient to clear
the memory and then set the nonzero elements. */
cleared = true;
else
cleared = false;
/* If there are "lots" of initialized elements, and all of them
are valid address constants, then the entire initializer can
be dropped to memory, and then memcpy'd out. Don't do this
for sparse arrays, though, as it's more efficient to follow
the standard CONSTRUCTOR behavior of memset followed by
individual element initialization. Also don't do this for small
all-zero initializers (which aren't big enough to merit
clearing), and don't try to make bitwise copies of
TREE_ADDRESSABLE types.
We cannot apply such transformation when compiling chkp static
initializer because creation of initializer image in the memory
will require static initialization of bounds for it. It should
result in another gimplification of similar initializer and we
may fall into infinite loop. */
if (valid_const_initializer
&& !(cleared || num_nonzero_elements == 0)
&& !TREE_ADDRESSABLE (type)
&& (!current_function_decl
|| !lookup_attribute ("chkp ctor",
DECL_ATTRIBUTES (current_function_decl))))
{
HOST_WIDE_INT size = int_size_in_bytes (type);
unsigned int align;
/* ??? We can still get unbounded array types, at least
from the C++ front end. This seems wrong, but attempt
to work around it for now. */
if (size < 0)
{
size = int_size_in_bytes (TREE_TYPE (object));
if (size >= 0)
TREE_TYPE (ctor) = type = TREE_TYPE (object);
}
/* Find the maximum alignment we can assume for the object. */
/* ??? Make use of DECL_OFFSET_ALIGN. */
if (DECL_P (object))
align = DECL_ALIGN (object);
else
align = TYPE_ALIGN (type);
/* Do a block move either if the size is so small as to make
each individual move a sub-unit move on average, or if it
is so large as to make individual moves inefficient. */
if (size > 0
&& num_nonzero_elements > 1
&& (size < num_nonzero_elements
|| !can_move_by_pieces (size, align)))
{
if (notify_temp_creation)
return GS_ERROR;
walk_tree (&ctor, force_labels_r, NULL, NULL);
ctor = tree_output_constant_def (ctor);
if (!useless_type_conversion_p (type, TREE_TYPE (ctor)))
ctor = build1 (VIEW_CONVERT_EXPR, type, ctor);
TREE_OPERAND (*expr_p, 1) = ctor;
/* This is no longer an assignment of a CONSTRUCTOR, but
we still may have processing to do on the LHS. So
pretend we didn't do anything here to let that happen. */
return GS_UNHANDLED;
}
}
/* If the target is volatile, we have non-zero elements and more than
one field to assign, initialize the target from a temporary. */
if (TREE_THIS_VOLATILE (object)
&& !TREE_ADDRESSABLE (type)
&& num_nonzero_elements > 0
&& vec_safe_length (elts) > 1)
{
tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type));
TREE_OPERAND (*expr_p, 0) = temp;
*expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p),
*expr_p,
build2 (MODIFY_EXPR, void_type_node,
object, temp));
return GS_OK;
}
if (notify_temp_creation)
return GS_OK;
/* If there are nonzero elements and if needed, pre-evaluate to capture
elements overlapping with the lhs into temporaries. We must do this
before clearing to fetch the values before they are zeroed-out. */
if (num_nonzero_elements > 0 && TREE_CODE (*expr_p) != INIT_EXPR)
{
preeval_data.lhs_base_decl = get_base_address (object);
if (!DECL_P (preeval_data.lhs_base_decl))
preeval_data.lhs_base_decl = NULL;
preeval_data.lhs_alias_set = get_alias_set (object);
gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1),
pre_p, post_p, &preeval_data);
}
if (cleared)
{
/* Zap the CONSTRUCTOR element list, which simplifies this case.
Note that we still have to gimplify, in order to handle the
case of variable sized types. Avoid shared tree structures. */
CONSTRUCTOR_ELTS (ctor) = NULL;
TREE_SIDE_EFFECTS (ctor) = 0;
object = unshare_expr (object);
gimplify_stmt (expr_p, pre_p);
}
/* If we have not block cleared the object, or if there are nonzero
elements in the constructor, add assignments to the individual
scalar fields of the object. */
if (!cleared || num_nonzero_elements > 0)
gimplify_init_ctor_eval (object, elts, pre_p, cleared);
*expr_p = NULL_TREE;
}
break;
case COMPLEX_TYPE:
{
tree r, i;
if (notify_temp_creation)
return GS_OK;
/* Extract the real and imaginary parts out of the ctor. */
gcc_assert (elts->length () == 2);
r = (*elts)[0].value;
i = (*elts)[1].value;
if (r == NULL || i == NULL)
{
tree zero = build_zero_cst (TREE_TYPE (type));
if (r == NULL)
r = zero;
if (i == NULL)
i = zero;
}
/* Complex types have either COMPLEX_CST or COMPLEX_EXPR to
represent creation of a complex value. */
if (TREE_CONSTANT (r) && TREE_CONSTANT (i))
{
ctor = build_complex (type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
}
else
{
ctor = build2 (COMPLEX_EXPR, type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1),
pre_p,
post_p,
rhs_predicate_for (TREE_OPERAND (*expr_p, 0)),
fb_rvalue);
}
}
break;
case VECTOR_TYPE:
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
if (notify_temp_creation)
return GS_OK;
/* Go ahead and simplify constant constructors to VECTOR_CST. */
if (TREE_CONSTANT (ctor))
{
bool constant_p = true;
tree value;
/* Even when ctor is constant, it might contain non-*_CST
elements, such as addresses or trapping values like
1.0/0.0 - 1.0/0.0. Such expressions don't belong
in VECTOR_CST nodes. */
FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
{
TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts);
break;
}
TREE_CONSTANT (ctor) = 0;
}
/* Vector types use CONSTRUCTOR all the way through gimple
compilation as a general initializer. */
FOR_EACH_VEC_SAFE_ELT (elts, ix, ce)
{
enum gimplify_status tret;
tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val,
fb_rvalue);
if (tret == GS_ERROR)
ret = GS_ERROR;
else if (TREE_STATIC (ctor)
&& !initializer_constant_valid_p (ce->value,
TREE_TYPE (ce->value)))
TREE_STATIC (ctor) = 0;
}
if (!is_gimple_reg (TREE_OPERAND (*expr_p, 0)))
TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p);
}
break;
default:
/* So how did we get a CONSTRUCTOR for a scalar type? */
gcc_unreachable ();
}
if (ret == GS_ERROR)
return GS_ERROR;
else if (want_value)
{
*expr_p = object;
return GS_OK;
}
else
{
/* If we have gimplified both sides of the initializer but have
not emitted an assignment, do so now. */
if (*expr_p)
{
tree lhs = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_OPERAND (*expr_p, 1);
gassign *init = gimple_build_assign (lhs, rhs);
gimplify_seq_add_stmt (pre_p, init);
*expr_p = NULL;
}
return GS_ALL_DONE;
}
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. This may only be applied to a rhs of an expression.
Note that the resulting type may be different from the type pointed
to in the sense that it is still compatible from the langhooks
point of view. */
static tree
gimple_fold_indirect_ref_rhs (tree t)
{
return gimple_fold_indirect_ref (t);
}
/* Subroutine of gimplify_modify_expr to do simplifications of
MODIFY_EXPRs based on the code of the RHS. We loop for as long as
something changes. */
static enum gimplify_status
gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p,
gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
enum gimplify_status ret = GS_UNHANDLED;
bool changed;
do
{
changed = false;
switch (TREE_CODE (*from_p))
{
case VAR_DECL:
/* If we're assigning from a read-only variable initialized with
a constructor, do the direct assignment from the constructor,
but only if neither source nor target are volatile since this
latter assignment might end up being done on a per-field basis. */
if (DECL_INITIAL (*from_p)
&& TREE_READONLY (*from_p)
&& !TREE_THIS_VOLATILE (*from_p)
&& !TREE_THIS_VOLATILE (*to_p)
&& TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR)
{
tree old_from = *from_p;
enum gimplify_status subret;
/* Move the constructor into the RHS. */
*from_p = unshare_expr (DECL_INITIAL (*from_p));
/* Let's see if gimplify_init_constructor will need to put
it in memory. */
subret = gimplify_init_constructor (expr_p, NULL, NULL,
false, true);
if (subret == GS_ERROR)
{
/* If so, revert the change. */
*from_p = old_from;
}
else
{
ret = GS_OK;
changed = true;
}
}
break;
case INDIRECT_REF:
{
/* If we have code like
*(const A*)(A*)&x
where the type of "x" is a (possibly cv-qualified variant
of "A"), treat the entire expression as identical to "x".
This kind of code arises in C++ when an object is bound
to a const reference, and if "x" is a TARGET_EXPR we want
to take advantage of the optimization below. */
bool volatile_p = TREE_THIS_VOLATILE (*from_p);
tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0));
if (t)
{
if (TREE_THIS_VOLATILE (t) != volatile_p)
{
if (TREE_CODE_CLASS (TREE_CODE (t)) == tcc_declaration)
t = build_simple_mem_ref_loc (EXPR_LOCATION (*from_p),
build_fold_addr_expr (t));
if (REFERENCE_CLASS_P (t))
TREE_THIS_VOLATILE (t) = volatile_p;
}
*from_p = t;
ret = GS_OK;
changed = true;
}
break;
}
case TARGET_EXPR:
{
/* If we are initializing something from a TARGET_EXPR, strip the
TARGET_EXPR and initialize it directly, if possible. This can't
be done if the initializer is void, since that implies that the
temporary is set in some non-trivial way.
??? What about code that pulls out the temp and uses it
elsewhere? I think that such code never uses the TARGET_EXPR as
an initializer. If I'm wrong, we'll die because the temp won't
have any RTL. In that case, I guess we'll need to replace
references somehow. */
tree init = TARGET_EXPR_INITIAL (*from_p);
if (init
&& !VOID_TYPE_P (TREE_TYPE (init)))
{
*from_p = init;
ret = GS_OK;
changed = true;
}
}
break;
case COMPOUND_EXPR:
/* Remove any COMPOUND_EXPR in the RHS so the following cases will be
caught. */
gimplify_compound_expr (from_p, pre_p, true);
ret = GS_OK;
changed = true;
break;
case CONSTRUCTOR:
/* If we already made some changes, let the front end have a
crack at this before we break it down. */
if (ret != GS_UNHANDLED)
break;
/* If we're initializing from a CONSTRUCTOR, break this into
individual MODIFY_EXPRs. */
return gimplify_init_constructor (expr_p, pre_p, post_p, want_value,
false);
case COND_EXPR:
/* If we're assigning to a non-register type, push the assignment
down into the branches. This is mandatory for ADDRESSABLE types,
since we cannot generate temporaries for such, but it saves a
copy in other cases as well. */
if (!is_gimple_reg_type (TREE_TYPE (*from_p)))
{
/* This code should mirror the code in gimplify_cond_expr. */
enum tree_code code = TREE_CODE (*expr_p);
tree cond = *from_p;
tree result = *to_p;
ret = gimplify_expr (&result, pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node)
TREE_OPERAND (cond, 1)
= build2 (code, void_type_node, result,
TREE_OPERAND (cond, 1));
if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
TREE_OPERAND (cond, 2)
= build2 (code, void_type_node, unshare_expr (result),
TREE_OPERAND (cond, 2));
TREE_TYPE (cond) = void_type_node;
recalculate_side_effects (cond);
if (want_value)
{
gimplify_and_add (cond, pre_p);
*expr_p = unshare_expr (result);
}
else
*expr_p = cond;
return ret;
}
break;
case CALL_EXPR:
/* For calls that return in memory, give *to_p as the CALL_EXPR's
return slot so that we don't generate a temporary. */
if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p)
&& aggregate_value_p (*from_p, *from_p))
{
bool use_target;
if (!(rhs_predicate_for (*to_p))(*from_p))
/* If we need a temporary, *to_p isn't accurate. */
use_target = false;
/* It's OK to use the return slot directly unless it's an NRV. */
else if (TREE_CODE (*to_p) == RESULT_DECL
&& DECL_NAME (*to_p) == NULL_TREE
&& needs_to_live_in_memory (*to_p))
use_target = true;
else if (is_gimple_reg_type (TREE_TYPE (*to_p))
|| (DECL_P (*to_p) && DECL_REGISTER (*to_p)))
/* Don't force regs into memory. */
use_target = false;
else if (TREE_CODE (*expr_p) == INIT_EXPR)
/* It's OK to use the target directly if it's being
initialized. */
use_target = true;
else if (variably_modified_type_p (TREE_TYPE (*to_p), NULL_TREE))
/* Always use the target and thus RSO for variable-sized types.
GIMPLE cannot deal with a variable-sized assignment
embedded in a call statement. */
use_target = true;
else if (TREE_CODE (*to_p) != SSA_NAME
&& (!is_gimple_variable (*to_p)
|| needs_to_live_in_memory (*to_p)))
/* Don't use the original target if it's already addressable;
if its address escapes, and the called function uses the
NRV optimization, a conforming program could see *to_p
change before the called function returns; see c++/19317.
When optimizing, the return_slot pass marks more functions
as safe after we have escape info. */
use_target = false;
else
use_target = true;
if (use_target)
{
CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1;
mark_addressable (*to_p);
}
}
break;
case WITH_SIZE_EXPR:
/* Likewise for calls that return an aggregate of non-constant size,
since we would not be able to generate a temporary at all. */
if (TREE_CODE (TREE_OPERAND (*from_p, 0)) == CALL_EXPR)
{
*from_p = TREE_OPERAND (*from_p, 0);
/* We don't change ret in this case because the
WITH_SIZE_EXPR might have been added in
gimplify_modify_expr, so returning GS_OK would lead to an
infinite loop. */
changed = true;
}
break;
/* If we're initializing from a container, push the initialization
inside it. */
case CLEANUP_POINT_EXPR:
case BIND_EXPR:
case STATEMENT_LIST:
{
tree wrap = *from_p;
tree t;
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval,
fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
t = voidify_wrapper_expr (wrap, *expr_p);
gcc_assert (t == *expr_p);
if (want_value)
{
gimplify_and_add (wrap, pre_p);
*expr_p = unshare_expr (*to_p);
}
else
*expr_p = wrap;
return GS_OK;
}
case COMPOUND_LITERAL_EXPR:
{
tree complit = TREE_OPERAND (*expr_p, 1);
tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (complit);
tree decl = DECL_EXPR_DECL (decl_s);
tree init = DECL_INITIAL (decl);
/* struct T x = (struct T) { 0, 1, 2 } can be optimized
into struct T x = { 0, 1, 2 } if the address of the
compound literal has never been taken. */
if (!TREE_ADDRESSABLE (complit)
&& !TREE_ADDRESSABLE (decl)
&& init)
{
*expr_p = copy_node (*expr_p);
TREE_OPERAND (*expr_p, 1) = init;
return GS_OK;
}
}
default:
break;
}
}
while (changed);
return ret;
}
/* Return true if T looks like a valid GIMPLE statement. */
static bool
is_gimple_stmt (tree t)
{
const enum tree_code code = TREE_CODE (t);
switch (code)
{
case NOP_EXPR:
/* The only valid NOP_EXPR is the empty statement. */
return IS_EMPTY_STMT (t);
case BIND_EXPR:
case COND_EXPR:
/* These are only valid if they're void. */
return TREE_TYPE (t) == NULL || VOID_TYPE_P (TREE_TYPE (t));
case SWITCH_EXPR:
case GOTO_EXPR:
case RETURN_EXPR:
case LABEL_EXPR:
case CASE_LABEL_EXPR:
case TRY_CATCH_EXPR:
case TRY_FINALLY_EXPR:
case EH_FILTER_EXPR:
case CATCH_EXPR:
case ASM_EXPR:
case STATEMENT_LIST:
case OACC_PARALLEL:
case OACC_KERNELS:
case OACC_DATA:
case OACC_HOST_DATA:
case OACC_DECLARE:
case OACC_UPDATE:
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_CACHE:
case OMP_PARALLEL:
case OMP_FOR:
case OMP_SIMD:
case CILK_SIMD:
case OMP_DISTRIBUTE:
case OACC_LOOP:
case OMP_SECTIONS:
case OMP_SECTION:
case OMP_SINGLE:
case OMP_MASTER:
case OMP_TASKGROUP:
case OMP_ORDERED:
case OMP_CRITICAL:
case OMP_TASK:
/* These are always void. */
return true;
case CALL_EXPR:
case MODIFY_EXPR:
case PREDICT_EXPR:
/* These are valid regardless of their type. */
return true;
default:
return false;
}
}
/* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is
a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with
DECL_GIMPLE_REG_P set.
IMPORTANT NOTE: This promotion is performed by introducing a load of the
other, unmodified part of the complex object just before the total store.
As a consequence, if the object is still uninitialized, an undefined value
will be loaded into a register, which may result in a spurious exception
if the register is floating-point and the value happens to be a signaling
NaN for example. Then the fully-fledged complex operations lowering pass
followed by a DCE pass are necessary in order to fix things up. */
static enum gimplify_status
gimplify_modify_expr_complex_part (tree *expr_p, gimple_seq *pre_p,
bool want_value)
{
enum tree_code code, ocode;
tree lhs, rhs, new_rhs, other, realpart, imagpart;
lhs = TREE_OPERAND (*expr_p, 0);
rhs = TREE_OPERAND (*expr_p, 1);
code = TREE_CODE (lhs);
lhs = TREE_OPERAND (lhs, 0);
ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR;
other = build1 (ocode, TREE_TYPE (rhs), lhs);
TREE_NO_WARNING (other) = 1;
other = get_formal_tmp_var (other, pre_p);
realpart = code == REALPART_EXPR ? rhs : other;
imagpart = code == REALPART_EXPR ? other : rhs;
if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart))
new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart);
else
new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart);
gimplify_seq_add_stmt (pre_p, gimple_build_assign (lhs, new_rhs));
*expr_p = (want_value) ? rhs : NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify the MODIFY_EXPR node pointed to by EXPR_P.
modify_expr
: varname '=' rhs
| '*' ID '=' rhs
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool want_value)
{
tree *from_p = &TREE_OPERAND (*expr_p, 1);
tree *to_p = &TREE_OPERAND (*expr_p, 0);
enum gimplify_status ret = GS_UNHANDLED;
gimple assign;
location_t loc = EXPR_LOCATION (*expr_p);
gimple_stmt_iterator gsi;
gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
|| TREE_CODE (*expr_p) == INIT_EXPR);
/* Trying to simplify a clobber using normal logic doesn't work,
so handle it here. */
if (TREE_CLOBBER_P (*from_p))
{
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
gcc_assert (!want_value
&& (TREE_CODE (*to_p) == VAR_DECL
|| TREE_CODE (*to_p) == MEM_REF));
gimplify_seq_add_stmt (pre_p, gimple_build_assign (*to_p, *from_p));
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Insert pointer conversions required by the middle-end that are not
required by the frontend. This fixes middle-end type checking for
for example gcc.dg/redecl-6.c. */
if (POINTER_TYPE_P (TREE_TYPE (*to_p)))
{
STRIP_USELESS_TYPE_CONVERSION (*from_p);
if (!useless_type_conversion_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p)))
*from_p = fold_convert_loc (loc, TREE_TYPE (*to_p), *from_p);
}
/* See if any simplifications can be done based on what the RHS is. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* For zero sized types only gimplify the left hand side and right hand
side as statements and throw away the assignment. Do this after
gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
types properly. */
if (zero_sized_type (TREE_TYPE (*from_p)) && !want_value)
{
gimplify_stmt (from_p, pre_p);
gimplify_stmt (to_p, pre_p);
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* If the value being copied is of variable width, compute the length
of the copy into a WITH_SIZE_EXPR. Note that we need to do this
before gimplifying any of the operands so that we can resolve any
PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses
the size of the expression to be copied, not of the destination, so
that is what we must do here. */
maybe_with_size_expr (from_p);
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* As a special case, we have to temporarily allow for assignments
with a CALL_EXPR on the RHS. Since in GIMPLE a function call is
a toplevel statement, when gimplifying the GENERIC expression
MODIFY_EXPR <a, CALL_EXPR <foo>>, we cannot create the tuple
GIMPLE_ASSIGN <a, GIMPLE_CALL <foo>>.
Instead, we need to create the tuple GIMPLE_CALL <a, foo>. To
prevent gimplify_expr from trying to create a new temporary for
foo's LHS, we tell it that it should only gimplify until it
reaches the CALL_EXPR. On return from gimplify_expr, the newly
created GIMPLE_CALL <foo> will be the last statement in *PRE_P
and all we need to do here is set 'a' to be its LHS. */
ret = gimplify_expr (from_p, pre_p, post_p, rhs_predicate_for (*to_p),
fb_rvalue);
if (ret == GS_ERROR)
return ret;
/* Now see if the above changed *from_p to something we handle specially. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* If we've got a variable sized assignment between two lvalues (i.e. does
not involve a call), then we can make things a bit more straightforward
by converting the assignment to memcpy or memset. */
if (TREE_CODE (*from_p) == WITH_SIZE_EXPR)
{
tree from = TREE_OPERAND (*from_p, 0);
tree size = TREE_OPERAND (*from_p, 1);
if (TREE_CODE (from) == CONSTRUCTOR)
return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p);
if (is_gimple_addressable (from))
{
*from_p = from;
return gimplify_modify_expr_to_memcpy (expr_p, size, want_value,
pre_p);
}
}
/* Transform partial stores to non-addressable complex variables into
total stores. This allows us to use real instead of virtual operands
for these variables, which improves optimization. */
if ((TREE_CODE (*to_p) == REALPART_EXPR
|| TREE_CODE (*to_p) == IMAGPART_EXPR)
&& is_gimple_reg (TREE_OPERAND (*to_p, 0)))
return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value);
/* Try to alleviate the effects of the gimplification creating artificial
temporaries (see for example is_gimple_reg_rhs) on the debug info. */
if (!gimplify_ctxp->into_ssa
&& TREE_CODE (*from_p) == VAR_DECL
&& DECL_IGNORED_P (*from_p)
&& DECL_P (*to_p)
&& !DECL_IGNORED_P (*to_p))
{
if (!DECL_NAME (*from_p) && DECL_NAME (*to_p))
DECL_NAME (*from_p)
= create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p)));
DECL_HAS_DEBUG_EXPR_P (*from_p) = 1;
SET_DECL_DEBUG_EXPR (*from_p, *to_p);
}
if (want_value && TREE_THIS_VOLATILE (*to_p))
*from_p = get_initialized_tmp_var (*from_p, pre_p, post_p);
if (TREE_CODE (*from_p) == CALL_EXPR)
{
/* Since the RHS is a CALL_EXPR, we need to create a GIMPLE_CALL
instead of a GIMPLE_ASSIGN. */
gcall *call_stmt;
if (CALL_EXPR_FN (*from_p) == NULL_TREE)
{
/* Gimplify internal functions created in the FEs. */
int nargs = call_expr_nargs (*from_p), i;
enum internal_fn ifn = CALL_EXPR_IFN (*from_p);
auto_vec<tree> vargs (nargs);
for (i = 0; i < nargs; i++)
{
gimplify_arg (&CALL_EXPR_ARG (*from_p, i), pre_p,
EXPR_LOCATION (*from_p));
vargs.quick_push (CALL_EXPR_ARG (*from_p, i));
}
call_stmt = gimple_build_call_internal_vec (ifn, vargs);
gimple_set_location (call_stmt, EXPR_LOCATION (*expr_p));
}
else
{
tree fnptrtype = TREE_TYPE (CALL_EXPR_FN (*from_p));
CALL_EXPR_FN (*from_p) = TREE_OPERAND (CALL_EXPR_FN (*from_p), 0);
STRIP_USELESS_TYPE_CONVERSION (CALL_EXPR_FN (*from_p));
tree fndecl = get_callee_fndecl (*from_p);
if (fndecl
&& DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (fndecl) == BUILT_IN_EXPECT
&& call_expr_nargs (*from_p) == 3)
call_stmt = gimple_build_call_internal (IFN_BUILTIN_EXPECT, 3,
CALL_EXPR_ARG (*from_p, 0),
CALL_EXPR_ARG (*from_p, 1),
CALL_EXPR_ARG (*from_p, 2));
else
{
call_stmt = gimple_build_call_from_tree (*from_p);
gimple_call_set_fntype (call_stmt, TREE_TYPE (fnptrtype));
}
}
notice_special_calls (call_stmt);
if (!gimple_call_noreturn_p (call_stmt))
gimple_call_set_lhs (call_stmt, *to_p);
assign = call_stmt;
}
else
{
assign = gimple_build_assign (*to_p, *from_p);
gimple_set_location (assign, EXPR_LOCATION (*expr_p));
}
if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p))
{
/* We should have got an SSA name from the start. */
gcc_assert (TREE_CODE (*to_p) == SSA_NAME);
}
gimplify_seq_add_stmt (pre_p, assign);
gsi = gsi_last (*pre_p);
maybe_fold_stmt (&gsi);
if (want_value)
{
*expr_p = TREE_THIS_VOLATILE (*to_p) ? *from_p : unshare_expr (*to_p);
return GS_OK;
}
else
*expr_p = NULL;
return GS_ALL_DONE;
}
/* Gimplify a comparison between two variable-sized objects. Do this
with a call to BUILT_IN_MEMCMP. */
static enum gimplify_status
gimplify_variable_sized_compare (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree t, arg, dest, src, expr;
arg = TYPE_SIZE_UNIT (TREE_TYPE (op0));
arg = unshare_expr (arg);
arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0);
src = build_fold_addr_expr_loc (loc, op1);
dest = build_fold_addr_expr_loc (loc, op0);
t = builtin_decl_implicit (BUILT_IN_MEMCMP);
t = build_call_expr_loc (loc, t, 3, dest, src, arg);
expr
= build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node);
SET_EXPR_LOCATION (expr, loc);
*expr_p = expr;
return GS_OK;
}
/* Gimplify a comparison between two aggregate objects of integral scalar
mode as a comparison between the bitwise equivalent scalar values. */
static enum gimplify_status
gimplify_scalar_mode_aggregate_compare (tree *expr_p)
{
location_t loc = EXPR_LOCATION (*expr_p);
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (op0);
tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1);
op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op0);
op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op1);
*expr_p
= fold_build2_loc (loc, TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1);
return GS_OK;
}
/* Gimplify an expression sequence. This function gimplifies each
expression and rewrites the original expression with the last
expression of the sequence in GIMPLE form.
PRE_P points to the list where the side effects for all the
expressions in the sequence will be emitted.
WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */
static enum gimplify_status
gimplify_compound_expr (tree *expr_p, gimple_seq *pre_p, bool want_value)
{
tree t = *expr_p;
do
{
tree *sub_p = &TREE_OPERAND (t, 0);
if (TREE_CODE (*sub_p) == COMPOUND_EXPR)
gimplify_compound_expr (sub_p, pre_p, false);
else
gimplify_stmt (sub_p, pre_p);
t = TREE_OPERAND (t, 1);
}
while (TREE_CODE (t) == COMPOUND_EXPR);
*expr_p = t;
if (want_value)
return GS_OK;
else
{
gimplify_stmt (expr_p, pre_p);
return GS_ALL_DONE;
}
}
/* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to
gimplify. After gimplification, EXPR_P will point to a new temporary
that holds the original value of the SAVE_EXPR node.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_save_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
enum gimplify_status ret = GS_ALL_DONE;
tree val;
gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR);
val = TREE_OPERAND (*expr_p, 0);
/* If the SAVE_EXPR has not been resolved, then evaluate it once. */
if (!SAVE_EXPR_RESOLVED_P (*expr_p))
{
/* The operand may be a void-valued expression such as SAVE_EXPRs
generated by the Java frontend for class initialization. It is
being executed only for its side-effects. */
if (TREE_TYPE (val) == void_type_node)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_stmt, fb_none);
val = NULL;
}
else
val = get_initialized_tmp_var (val, pre_p, post_p);
TREE_OPERAND (*expr_p, 0) = val;
SAVE_EXPR_RESOLVED_P (*expr_p) = 1;
}
*expr_p = val;
return ret;
}
/* Rewrite the ADDR_EXPR node pointed to by EXPR_P
unary_expr
: ...
| '&' varname
...
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr = *expr_p;
tree op0 = TREE_OPERAND (expr, 0);
enum gimplify_status ret;
location_t loc = EXPR_LOCATION (*expr_p);
switch (TREE_CODE (op0))
{
case INDIRECT_REF:
do_indirect_ref:
/* Check if we are dealing with an expression of the form '&*ptr'.
While the front end folds away '&*ptr' into 'ptr', these
expressions may be generated internally by the compiler (e.g.,
builtins like __builtin_va_end). */
/* Caution: the silent array decomposition semantics we allow for
ADDR_EXPR means we can't always discard the pair. */
/* Gimplification of the ADDR_EXPR operand may drop
cv-qualification conversions, so make sure we add them if
needed. */
{
tree op00 = TREE_OPERAND (op0, 0);
tree t_expr = TREE_TYPE (expr);
tree t_op00 = TREE_TYPE (op00);
if (!useless_type_conversion_p (t_expr, t_op00))
op00 = fold_convert_loc (loc, TREE_TYPE (expr), op00);
*expr_p = op00;
ret = GS_OK;
}
break;
case VIEW_CONVERT_EXPR:
/* Take the address of our operand and then convert it to the type of
this ADDR_EXPR.
??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at
all clear. The impact of this transformation is even less clear. */
/* If the operand is a useless conversion, look through it. Doing so
guarantees that the ADDR_EXPR and its operand will remain of the
same type. */
if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0)))
op0 = TREE_OPERAND (op0, 0);
*expr_p = fold_convert_loc (loc, TREE_TYPE (expr),
build_fold_addr_expr_loc (loc,
TREE_OPERAND (op0, 0)));
ret = GS_OK;
break;
default:
/* If we see a call to a declared builtin or see its address
being taken (we can unify those cases here) then we can mark
the builtin for implicit generation by GCC. */
if (TREE_CODE (op0) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (op0) == BUILT_IN_NORMAL
&& builtin_decl_declared_p (DECL_FUNCTION_CODE (op0)))
set_builtin_decl_implicit_p (DECL_FUNCTION_CODE (op0), true);
/* We use fb_either here because the C frontend sometimes takes
the address of a call that returns a struct; see
gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make
the implied temporary explicit. */
/* Make the operand addressable. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p,
is_gimple_addressable, fb_either);
if (ret == GS_ERROR)
break;
/* Then mark it. Beware that it may not be possible to do so directly
if a temporary has been created by the gimplification. */
prepare_gimple_addressable (&TREE_OPERAND (expr, 0), pre_p);
op0 = TREE_OPERAND (expr, 0);
/* For various reasons, the gimplification of the expression
may have made a new INDIRECT_REF. */
if (TREE_CODE (op0) == INDIRECT_REF)
goto do_indirect_ref;
mark_addressable (TREE_OPERAND (expr, 0));
/* The FEs may end up building ADDR_EXPRs early on a decl with
an incomplete type. Re-build ADDR_EXPRs in canonical form
here. */
if (!types_compatible_p (TREE_TYPE (op0), TREE_TYPE (TREE_TYPE (expr))))
*expr_p = build_fold_addr_expr (op0);
/* Make sure TREE_CONSTANT and TREE_SIDE_EFFECTS are set properly. */
recompute_tree_invariant_for_addr_expr (*expr_p);
/* If we re-built the ADDR_EXPR add a conversion to the original type
if required. */
if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p)))
*expr_p = fold_convert (TREE_TYPE (expr), *expr_p);
break;
}
return ret;
}
/* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple
value; output operands should be a gimple lvalue. */
static enum gimplify_status
gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree expr;
int noutputs;
const char **oconstraints;
int i;
tree link;
const char *constraint;
bool allows_mem, allows_reg, is_inout;
enum gimplify_status ret, tret;
gasm *stmt;
vec<tree, va_gc> *inputs;
vec<tree, va_gc> *outputs;
vec<tree, va_gc> *clobbers;
vec<tree, va_gc> *labels;
tree link_next;
expr = *expr_p;
noutputs = list_length (ASM_OUTPUTS (expr));
oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *));
inputs = NULL;
outputs = NULL;
clobbers = NULL;
labels = NULL;
ret = GS_ALL_DONE;
link_next = NULL_TREE;
for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = link_next)
{
bool ok;
size_t constraint_len;
link_next = TREE_CHAIN (link);
oconstraints[i]
= constraint
= TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
constraint_len = strlen (constraint);
if (constraint_len == 0)
continue;
ok = parse_output_constraint (&constraint, i, 0, 0,
&allows_mem, &allows_reg, &is_inout);
if (!ok)
{
ret = GS_ERROR;
is_inout = false;
}
if (!allows_reg && allows_mem)
mark_addressable (TREE_VALUE (link));
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_inout ? is_gimple_min_lval : is_gimple_lvalue,
fb_lvalue | fb_mayfail);
if (tret == GS_ERROR)
{
error ("invalid lvalue in asm output %d", i);
ret = tret;
}
vec_safe_push (outputs, link);
TREE_CHAIN (link) = NULL_TREE;
if (is_inout)
{
/* An input/output operand. To give the optimizers more
flexibility, split it into separate input and output
operands. */
tree input;
char buf[10];
/* Turn the in/out constraint into an output constraint. */
char *p = xstrdup (constraint);
p[0] = '=';
TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p);
/* And add a matching input constraint. */
if (allows_reg)
{
sprintf (buf, "%d", i);
/* If there are multiple alternatives in the constraint,
handle each of them individually. Those that allow register
will be replaced with operand number, the others will stay
unchanged. */
if (strchr (p, ',') != NULL)
{
size_t len = 0, buflen = strlen (buf);
char *beg, *end, *str, *dst;
for (beg = p + 1;;)
{
end = strchr (beg, ',');
if (end == NULL)
end = strchr (beg, '\0');
if ((size_t) (end - beg) < buflen)
len += buflen + 1;
else
len += end - beg + 1;
if (*end)
beg = end + 1;
else
break;
}
str = (char *) alloca (len);
for (beg = p + 1, dst = str;;)
{
const char *tem;
bool mem_p, reg_p, inout_p;
end = strchr (beg, ',');
if (end)
*end = '\0';
beg[-1] = '=';
tem = beg - 1;
parse_output_constraint (&tem, i, 0, 0,
&mem_p, ®_p, &inout_p);
if (dst != str)
*dst++ = ',';
if (reg_p)
{
memcpy (dst, buf, buflen);
dst += buflen;
}
else
{
if (end)
len = end - beg;
else
len = strlen (beg);
memcpy (dst, beg, len);
dst += len;
}
if (end)
beg = end + 1;
else
break;
}
*dst = '\0';
input = build_string (dst - str, str);
}
else
input = build_string (strlen (buf), buf);
}
else
input = build_string (constraint_len - 1, constraint + 1);
free (p);
input = build_tree_list (build_tree_list (NULL_TREE, input),
unshare_expr (TREE_VALUE (link)));
ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input);
}
}
link_next = NULL_TREE;
for (link = ASM_INPUTS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
parse_input_constraint (&constraint, 0, 0, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
/* If we can't make copies, we can only accept memory. */
if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link))))
{
if (allows_mem)
allows_reg = 0;
else
{
error ("impossible constraint in %<asm%>");
error ("non-memory input %d must stay in memory", i);
return GS_ERROR;
}
}
/* If the operand is a memory input, it should be an lvalue. */
if (!allows_reg && allows_mem)
{
tree inputv = TREE_VALUE (link);
STRIP_NOPS (inputv);
if (TREE_CODE (inputv) == PREDECREMENT_EXPR
|| TREE_CODE (inputv) == PREINCREMENT_EXPR
|| TREE_CODE (inputv) == POSTDECREMENT_EXPR
|| TREE_CODE (inputv) == POSTINCREMENT_EXPR)
TREE_VALUE (link) = error_mark_node;
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
if (tret != GS_ERROR)
{
/* Unlike output operands, memory inputs are not guaranteed
to be lvalues by the FE, and while the expressions are
marked addressable there, if it is e.g. a statement
expression, temporaries in it might not end up being
addressable. They might be already used in the IL and thus
it is too late to make them addressable now though. */
tree x = TREE_VALUE (link);
while (handled_component_p (x))
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) == MEM_REF
&& TREE_CODE (TREE_OPERAND (x, 0)) == ADDR_EXPR)
x = TREE_OPERAND (TREE_OPERAND (x, 0), 0);
if ((TREE_CODE (x) == VAR_DECL
|| TREE_CODE (x) == PARM_DECL
|| TREE_CODE (x) == RESULT_DECL)
&& !TREE_ADDRESSABLE (x)
&& is_gimple_reg (x))
{
warning_at (EXPR_LOC_OR_LOC (TREE_VALUE (link),
input_location), 0,
"memory input %d is not directly addressable",
i);
prepare_gimple_addressable (&TREE_VALUE (link), pre_p);
}
}
mark_addressable (TREE_VALUE (link));
if (tret == GS_ERROR)
{
error_at (EXPR_LOC_OR_LOC (TREE_VALUE (link), input_location),
"memory input %d is not directly addressable", i);
ret = tret;
}
}
else
{
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_asm_val, fb_rvalue);
if (tret == GS_ERROR)
ret = tret;
}
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (inputs, link);
}
link_next = NULL_TREE;
for (link = ASM_CLOBBERS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (clobbers, link);
}
link_next = NULL_TREE;
for (link = ASM_LABELS (expr); link; ++i, link = link_next)
{
link_next = TREE_CHAIN (link);
TREE_CHAIN (link) = NULL_TREE;
vec_safe_push (labels, link);
}
/* Do not add ASMs with errors to the gimple IL stream. */
if (ret != GS_ERROR)
{
stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)),
inputs, outputs, clobbers, labels);
gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr));
gimple_asm_set_input (stmt, ASM_INPUT_P (expr));
gimplify_seq_add_stmt (pre_p, stmt);
}
return ret;
}
/* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding
GIMPLE_WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while
gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we
return to this function.
FIXME should we complexify the prequeue handling instead? Or use flags
for all the cleanups and let the optimizer tighten them up? The current
code seems pretty fragile; it will break on a cleanup within any
non-conditional nesting. But any such nesting would be broken, anyway;
we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct
and continues out of it. We can do that at the RTL level, though, so
having an optimizer to tighten up try/finally regions would be a Good
Thing. */
static enum gimplify_status
gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p)
{
gimple_stmt_iterator iter;
gimple_seq body_sequence = NULL;
tree temp = voidify_wrapper_expr (*expr_p, NULL);
/* We only care about the number of conditions between the innermost
CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and
any cleanups collected outside the CLEANUP_POINT_EXPR. */
int old_conds = gimplify_ctxp->conditions;
gimple_seq old_cleanups = gimplify_ctxp->conditional_cleanups;
bool old_in_cleanup_point_expr = gimplify_ctxp->in_cleanup_point_expr;
gimplify_ctxp->conditions = 0;
gimplify_ctxp->conditional_cleanups = NULL;
gimplify_ctxp->in_cleanup_point_expr = true;
gimplify_stmt (&TREE_OPERAND (*expr_p, 0), &body_sequence);
gimplify_ctxp->conditions = old_conds;
gimplify_ctxp->conditional_cleanups = old_cleanups;
gimplify_ctxp->in_cleanup_point_expr = old_in_cleanup_point_expr;
for (iter = gsi_start (body_sequence); !gsi_end_p (iter); )
{
gimple wce = gsi_stmt (iter);
if (gimple_code (wce) == GIMPLE_WITH_CLEANUP_EXPR)
{
if (gsi_one_before_end_p (iter))
{
/* Note that gsi_insert_seq_before and gsi_remove do not
scan operands, unlike some other sequence mutators. */
if (!gimple_wce_cleanup_eh_only (wce))
gsi_insert_seq_before_without_update (&iter,
gimple_wce_cleanup (wce),
GSI_SAME_STMT);
gsi_remove (&iter, true);
break;
}
else
{
gtry *gtry;
gimple_seq seq;
enum gimple_try_flags kind;
if (gimple_wce_cleanup_eh_only (wce))
kind = GIMPLE_TRY_CATCH;
else
kind = GIMPLE_TRY_FINALLY;
seq = gsi_split_seq_after (iter);
gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind);
/* Do not use gsi_replace here, as it may scan operands.
We want to do a simple structural modification only. */
gsi_set_stmt (&iter, gtry);
iter = gsi_start (gtry->eval);
}
}
else
gsi_next (&iter);
}
gimplify_seq_add_seq (pre_p, body_sequence);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
else
{
*expr_p = NULL;
return GS_ALL_DONE;
}
}
/* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP
is the cleanup action required. EH_ONLY is true if the cleanup should
only be executed if an exception is thrown, not on normal exit. */
static void
gimple_push_cleanup (tree var, tree cleanup, bool eh_only, gimple_seq *pre_p)
{
gimple wce;
gimple_seq cleanup_stmts = NULL;
/* Errors can result in improperly nested cleanups. Which results in
confusion when trying to resolve the GIMPLE_WITH_CLEANUP_EXPR. */
if (seen_error ())
return;
if (gimple_conditional_context ())
{
/* If we're in a conditional context, this is more complex. We only
want to run the cleanup if we actually ran the initialization that
necessitates it, but we want to run it after the end of the
conditional context. So we wrap the try/finally around the
condition and use a flag to determine whether or not to actually
run the destructor. Thus
test ? f(A()) : 0
becomes (approximately)
flag = 0;
try {
if (test) { A::A(temp); flag = 1; val = f(temp); }
else { val = 0; }
} finally {
if (flag) A::~A(temp);
}
val
*/
tree flag = create_tmp_var (boolean_type_node, "cleanup");
gassign *ffalse = gimple_build_assign (flag, boolean_false_node);
gassign *ftrue = gimple_build_assign (flag, boolean_true_node);
cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL);
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, ffalse);
gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce);
gimplify_seq_add_stmt (pre_p, ftrue);
/* Because of this manipulation, and the EH edges that jump
threading cannot redirect, the temporary (VAR) will appear
to be used uninitialized. Don't warn. */
TREE_NO_WARNING (var) = 1;
}
else
{
gimplify_stmt (&cleanup, &cleanup_stmts);
wce = gimple_build_wce (cleanup_stmts);
gimple_wce_set_cleanup_eh_only (wce, eh_only);
gimplify_seq_add_stmt (pre_p, wce);
}
}
/* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */
static enum gimplify_status
gimplify_target_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree targ = *expr_p;
tree temp = TARGET_EXPR_SLOT (targ);
tree init = TARGET_EXPR_INITIAL (targ);
enum gimplify_status ret;
if (init)
{
tree cleanup = NULL_TREE;
/* TARGET_EXPR temps aren't part of the enclosing block, so add it
to the temps list. Handle also variable length TARGET_EXPRs. */
if (TREE_CODE (DECL_SIZE (temp)) != INTEGER_CST)
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp)))
gimplify_type_sizes (TREE_TYPE (temp), pre_p);
gimplify_vla_decl (temp, pre_p);
}
else
gimple_add_tmp_var (temp);
/* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the
expression is supposed to initialize the slot. */
if (VOID_TYPE_P (TREE_TYPE (init)))
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
else
{
tree init_expr = build2 (INIT_EXPR, void_type_node, temp, init);
init = init_expr;
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
init = NULL;
ggc_free (init_expr);
}
if (ret == GS_ERROR)
{
/* PR c++/28266 Make sure this is expanded only once. */
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
return GS_ERROR;
}
if (init)
gimplify_and_add (init, pre_p);
/* If needed, push the cleanup for the temp. */
if (TARGET_EXPR_CLEANUP (targ))
{
if (CLEANUP_EH_ONLY (targ))
gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ),
CLEANUP_EH_ONLY (targ), pre_p);
else
cleanup = TARGET_EXPR_CLEANUP (targ);
}
/* Add a clobber for the temporary going out of scope, like
gimplify_bind_expr. */
if (gimplify_ctxp->in_cleanup_point_expr
&& needs_to_live_in_memory (temp)
&& flag_stack_reuse == SR_ALL)
{
tree clobber = build_constructor (TREE_TYPE (temp),
NULL);
TREE_THIS_VOLATILE (clobber) = true;
clobber = build2 (MODIFY_EXPR, TREE_TYPE (temp), temp, clobber);
if (cleanup)
cleanup = build2 (COMPOUND_EXPR, void_type_node, cleanup,
clobber);
else
cleanup = clobber;
}
if (cleanup)
gimple_push_cleanup (temp, cleanup, false, pre_p);
/* Only expand this once. */
TREE_OPERAND (targ, 3) = init;
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
}
else
/* We should have expanded this before. */
gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp));
*expr_p = temp;
return GS_OK;
}
/* Gimplification of expression trees. */
/* Gimplify an expression which appears at statement context. The
corresponding GIMPLE statements are added to *SEQ_P. If *SEQ_P is
NULL, a new sequence is allocated.
Return true if we actually added a statement to the queue. */
bool
gimplify_stmt (tree *stmt_p, gimple_seq *seq_p)
{
gimple_seq_node last;
last = gimple_seq_last (*seq_p);
gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none);
return last != gimple_seq_last (*seq_p);
}
/* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels
to CTX. If entries already exist, force them to be some flavor of private.
If there is no enclosing parallel, do nothing. */
void
omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
if (decl == NULL || !DECL_P (decl))
return;
do
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN);
else if (n->value & GOVD_MAP)
n->value |= GOVD_MAP_TO_ONLY;
else
return;
}
else if (ctx->region_type == ORT_TARGET)
omp_add_variable (ctx, decl, GOVD_MAP | GOVD_MAP_TO_ONLY);
else if (ctx->region_type != ORT_WORKSHARE
&& ctx->region_type != ORT_SIMD
&& ctx->region_type != ORT_TARGET_DATA)
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
ctx = ctx->outer_context;
}
while (ctx);
}
/* Similarly for each of the type sizes of TYPE. */
static void
omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type)
{
if (type == NULL || type == error_mark_node)
return;
type = TYPE_MAIN_VARIANT (type);
if (ctx->privatized_types->add (type))
return;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type));
omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type));
break;
case ARRAY_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type));
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field));
}
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
break;
default:
break;
}
omp_firstprivatize_variable (ctx, TYPE_SIZE (type));
omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type));
lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type);
}
/* Add an entry for DECL in the OMP context CTX with FLAGS. */
static void
omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags)
{
splay_tree_node n;
unsigned int nflags;
tree t;
if (error_operand_p (decl))
return;
/* Never elide decls whose type has TREE_ADDRESSABLE set. This means
there are constructors involved somewhere. */
if (TREE_ADDRESSABLE (TREE_TYPE (decl))
|| TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl)))
flags |= GOVD_SEEN;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL && n->value != GOVD_ALIGNED)
{
/* We shouldn't be re-adding the decl with the same data
sharing class. */
gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0);
/* The only combination of data sharing classes we should see is
FIRSTPRIVATE and LASTPRIVATE. */
nflags = n->value | flags;
gcc_assert ((nflags & GOVD_DATA_SHARE_CLASS)
== (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE)
|| (flags & GOVD_DATA_SHARE_CLASS) == 0);
n->value = nflags;
return;
}
/* When adding a variable-sized variable, we have to handle all sorts
of additional bits of data: the pointer replacement variable, and
the parameters of the type. */
if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
/* Add the pointer replacement variable as PRIVATE if the variable
replacement is private, else FIRSTPRIVATE since we'll need the
address of the original variable either for SHARED, or for the
copy into or out of the context. */
if (!(flags & GOVD_LOCAL))
{
if (flags & GOVD_MAP)
nflags = GOVD_MAP | GOVD_MAP_TO_ONLY | GOVD_EXPLICIT;
else if (flags & GOVD_PRIVATE)
nflags = GOVD_PRIVATE;
else
nflags = GOVD_FIRSTPRIVATE;
nflags |= flags & GOVD_SEEN;
t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
omp_add_variable (ctx, t, nflags);
}
/* Add all of the variable and type parameters (which should have
been gimplified to a formal temporary) as FIRSTPRIVATE. */
omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl));
omp_firstprivatize_variable (ctx, DECL_SIZE (decl));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* The variable-sized variable itself is never SHARED, only some form
of PRIVATE. The sharing would take place via the pointer variable
which we remapped above. */
if (flags & GOVD_SHARED)
flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE
| (flags & (GOVD_SEEN | GOVD_EXPLICIT));
/* We're going to make use of the TYPE_SIZE_UNIT at least in the
alloca statement we generate for the variable, so make sure it
is available. This isn't automatically needed for the SHARED
case, since we won't be allocating local storage then.
For local variables TYPE_SIZE_UNIT might not be gimplified yet,
in this case omp_notice_variable will be called later
on when it is gimplified. */
else if (! (flags & (GOVD_LOCAL | GOVD_MAP))
&& DECL_P (TYPE_SIZE_UNIT (TREE_TYPE (decl))))
omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true);
}
else if ((flags & (GOVD_MAP | GOVD_LOCAL)) == 0
&& lang_hooks.decls.omp_privatize_by_reference (decl))
{
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* Similar to the direct variable sized case above, we'll need the
size of references being privatized. */
if ((flags & GOVD_SHARED) == 0)
{
t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
if (TREE_CODE (t) != INTEGER_CST)
omp_notice_variable (ctx, t, true);
}
}
if (n != NULL)
n->value |= flags;
else
splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags);
}
/* Notice a threadprivate variable DECL used in OMP context CTX.
This just prints out diagnostics about threadprivate variable uses
in untied tasks. If DECL2 is non-NULL, prevent this warning
on that variable. */
static bool
omp_notice_threadprivate_variable (struct gimplify_omp_ctx *ctx, tree decl,
tree decl2)
{
splay_tree_node n;
struct gimplify_omp_ctx *octx;
for (octx = ctx; octx; octx = octx->outer_context)
if (octx->region_type == ORT_TARGET)
{
n = splay_tree_lookup (octx->variables, (splay_tree_key)decl);
if (n == NULL)
{
error ("threadprivate variable %qE used in target region",
DECL_NAME (decl));
error_at (octx->location, "enclosing target region");
splay_tree_insert (octx->variables, (splay_tree_key)decl, 0);
}
if (decl2)
splay_tree_insert (octx->variables, (splay_tree_key)decl2, 0);
}
if (ctx->region_type != ORT_UNTIED_TASK)
return false;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n == NULL)
{
error ("threadprivate variable %qE used in untied task",
DECL_NAME (decl));
error_at (ctx->location, "enclosing task");
splay_tree_insert (ctx->variables, (splay_tree_key)decl, 0);
}
if (decl2)
splay_tree_insert (ctx->variables, (splay_tree_key)decl2, 0);
return false;
}
/* Record the fact that DECL was used within the OMP context CTX.
IN_CODE is true when real code uses DECL, and false when we should
merely emit default(none) errors. Return true if DECL is going to
be remapped and thus DECL shouldn't be gimplified into its
DECL_VALUE_EXPR (if any). */
static bool
omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
{
splay_tree_node n;
unsigned flags = in_code ? GOVD_SEEN : 0;
bool ret = false, shared;
if (error_operand_p (decl))
return false;
/* Threadprivate variables are predetermined. */
if (is_global_var (decl))
{
if (DECL_THREAD_LOCAL_P (decl))
return omp_notice_threadprivate_variable (ctx, decl, NULL_TREE);
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value))
return omp_notice_threadprivate_variable (ctx, decl, value);
}
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (ctx->region_type == ORT_TARGET)
{
ret = lang_hooks.decls.omp_disregard_value_expr (decl, true);
if (n == NULL)
{
if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (decl)))
{
error ("%qD referenced in target region does not have "
"a mappable type", decl);
omp_add_variable (ctx, decl, GOVD_MAP | GOVD_EXPLICIT | flags);
}
else
omp_add_variable (ctx, decl, GOVD_MAP | flags);
}
else
{
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
n->value |= flags;
}
goto do_outer;
}
if (n == NULL)
{
enum omp_clause_default_kind default_kind, kind;
struct gimplify_omp_ctx *octx;
if (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_SIMD
|| ctx->region_type == ORT_TARGET_DATA)
goto do_outer;
/* ??? Some compiler-generated variables (like SAVE_EXPRs) could be
remapped firstprivate instead of shared. To some extent this is
addressed in omp_firstprivatize_type_sizes, but not effectively. */
default_kind = ctx->default_kind;
kind = lang_hooks.decls.omp_predetermined_sharing (decl);
if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED)
default_kind = kind;
switch (default_kind)
{
case OMP_CLAUSE_DEFAULT_NONE:
if ((ctx->region_type & ORT_PARALLEL) != 0)
{
error ("%qE not specified in enclosing parallel",
DECL_NAME (lang_hooks.decls.omp_report_decl (decl)));
error_at (ctx->location, "enclosing parallel");
}
else if ((ctx->region_type & ORT_TASK) != 0)
{
error ("%qE not specified in enclosing task",
DECL_NAME (lang_hooks.decls.omp_report_decl (decl)));
error_at (ctx->location, "enclosing task");
}
else if (ctx->region_type & ORT_TEAMS)
{
error ("%qE not specified in enclosing teams construct",
DECL_NAME (lang_hooks.decls.omp_report_decl (decl)));
error_at (ctx->location, "enclosing teams construct");
}
else
gcc_unreachable ();
/* FALLTHRU */
case OMP_CLAUSE_DEFAULT_SHARED:
flags |= GOVD_SHARED;
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
flags |= GOVD_PRIVATE;
break;
case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE:
flags |= GOVD_FIRSTPRIVATE;
break;
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
/* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */
gcc_assert ((ctx->region_type & ORT_TASK) != 0);
if (ctx->outer_context)
omp_notice_variable (ctx->outer_context, decl, in_code);
for (octx = ctx->outer_context; octx; octx = octx->outer_context)
{
splay_tree_node n2;
if ((octx->region_type & (ORT_TARGET_DATA | ORT_TARGET)) != 0)
continue;
n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl);
if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED)
{
flags |= GOVD_FIRSTPRIVATE;
break;
}
if ((octx->region_type & (ORT_PARALLEL | ORT_TEAMS)) != 0)
break;
}
if (flags & GOVD_FIRSTPRIVATE)
break;
if (octx == NULL
&& (TREE_CODE (decl) == PARM_DECL
|| (!is_global_var (decl)
&& DECL_CONTEXT (decl) == current_function_decl)))
{
flags |= GOVD_FIRSTPRIVATE;
break;
}
flags |= GOVD_SHARED;
break;
default:
gcc_unreachable ();
}
if ((flags & GOVD_PRIVATE)
&& lang_hooks.decls.omp_private_outer_ref (decl))
flags |= GOVD_PRIVATE_OUTER_REF;
omp_add_variable (ctx, decl, flags);
shared = (flags & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
goto do_outer;
}
if ((n->value & (GOVD_SEEN | GOVD_LOCAL)) == 0
&& (flags & (GOVD_SEEN | GOVD_LOCAL)) == GOVD_SEEN
&& DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
splay_tree_node n2;
tree t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t);
n2->value |= GOVD_SEEN;
}
shared = ((flags | n->value) & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
flags |= n->value;
n->value = flags;
do_outer:
/* If the variable is private in the current context, then we don't
need to propagate anything to an outer context. */
if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF))
return ret;
if ((flags & (GOVD_LINEAR | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
== (GOVD_LINEAR | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
return ret;
if ((flags & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
== (GOVD_LASTPRIVATE | GOVD_LINEAR_LASTPRIVATE_NO_OUTER))
return ret;
if (ctx->outer_context
&& omp_notice_variable (ctx->outer_context, decl, in_code))
return true;
return ret;
}
/* Verify that DECL is private within CTX. If there's specific information
to the contrary in the innermost scope, generate an error. */
static bool
omp_is_private (struct gimplify_omp_ctx *ctx, tree decl, int simd)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
{
if (ctx == gimplify_omp_ctxp)
{
if (simd)
error ("iteration variable %qE is predetermined linear",
DECL_NAME (decl));
else
error ("iteration variable %qE should be private",
DECL_NAME (decl));
n->value = GOVD_PRIVATE;
return true;
}
else
return false;
}
else if ((n->value & GOVD_EXPLICIT) != 0
&& (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx)))
{
if ((n->value & GOVD_FIRSTPRIVATE) != 0)
error ("iteration variable %qE should not be firstprivate",
DECL_NAME (decl));
else if ((n->value & GOVD_REDUCTION) != 0)
error ("iteration variable %qE should not be reduction",
DECL_NAME (decl));
else if (simd == 1 && (n->value & GOVD_LASTPRIVATE) != 0)
error ("iteration variable %qE should not be lastprivate",
DECL_NAME (decl));
else if (simd && (n->value & GOVD_PRIVATE) != 0)
error ("iteration variable %qE should not be private",
DECL_NAME (decl));
else if (simd == 2 && (n->value & GOVD_LINEAR) != 0)
error ("iteration variable %qE is predetermined linear",
DECL_NAME (decl));
}
return (ctx == gimplify_omp_ctxp
|| (ctx->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context == ctx));
}
if (ctx->region_type != ORT_WORKSHARE
&& ctx->region_type != ORT_SIMD)
return false;
else if (ctx->outer_context)
return omp_is_private (ctx->outer_context, decl, simd);
return false;
}
/* Return true if DECL is private within a parallel region
that binds to the current construct's context or in parallel
region's REDUCTION clause. */
static bool
omp_check_private (struct gimplify_omp_ctx *ctx, tree decl, bool copyprivate)
{
splay_tree_node n;
do
{
ctx = ctx->outer_context;
if (ctx == NULL)
return !(is_global_var (decl)
/* References might be private, but might be shared too,
when checking for copyprivate, assume they might be
private, otherwise assume they might be shared. */
|| (!copyprivate
&& lang_hooks.decls.omp_privatize_by_reference (decl)));
if ((ctx->region_type & (ORT_TARGET | ORT_TARGET_DATA)) != 0)
continue;
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n != NULL)
return (n->value & GOVD_SHARED) == 0;
}
while (ctx->region_type == ORT_WORKSHARE
|| ctx->region_type == ORT_SIMD);
return false;
}
/* Return true if the CTX is combined with distribute and thus
lastprivate can't be supported. */
static bool
omp_no_lastprivate (struct gimplify_omp_ctx *ctx)
{
do
{
if (ctx->outer_context == NULL)
return false;
ctx = ctx->outer_context;
switch (ctx->region_type)
{
case ORT_WORKSHARE:
if (!ctx->combined_loop)
return false;
if (ctx->distribute)
return true;
break;
case ORT_COMBINED_PARALLEL:
break;
case ORT_COMBINED_TEAMS:
return true;
default:
return false;
}
}
while (1);
}
/* Scan the OMP clauses in *LIST_P, installing mappings into a new
and previous omp contexts. */
static void
gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
enum omp_region_type region_type)
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
tree c;
ctx = new_omp_context (region_type);
outer_ctx = ctx->outer_context;
while ((c = *list_p) != NULL)
{
bool remove = false;
bool notice_outer = true;
const char *check_non_private = NULL;
unsigned int flags;
tree decl;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
flags = GOVD_PRIVATE | GOVD_EXPLICIT;
if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c)))
{
flags |= GOVD_PRIVATE_OUTER_REF;
OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1;
}
else
notice_outer = false;
goto do_add;
case OMP_CLAUSE_SHARED:
flags = GOVD_SHARED | GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_FIRSTPRIVATE:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
check_non_private = "firstprivate";
goto do_add;
case OMP_CLAUSE_LASTPRIVATE:
flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "lastprivate";
decl = OMP_CLAUSE_DECL (c);
if (omp_no_lastprivate (ctx))
{
notice_outer = false;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
else if (error_operand_p (decl))
goto do_add;
else if (outer_ctx
&& outer_ctx->region_type == ORT_COMBINED_PARALLEL
&& splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl) == NULL)
omp_add_variable (outer_ctx, decl, GOVD_SHARED | GOVD_SEEN);
else if (outer_ctx
&& outer_ctx->region_type == ORT_WORKSHARE
&& outer_ctx->combined_loop
&& splay_tree_lookup (outer_ctx->variables,
(splay_tree_key) decl) == NULL
&& !omp_check_private (outer_ctx, decl, false))
{
omp_add_variable (outer_ctx, decl, GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer_ctx->outer_context
&& (outer_ctx->outer_context->region_type
== ORT_COMBINED_PARALLEL)
&& splay_tree_lookup (outer_ctx->outer_context->variables,
(splay_tree_key) decl) == NULL)
omp_add_variable (outer_ctx->outer_context, decl,
GOVD_SHARED | GOVD_SEEN);
}
goto do_add;
case OMP_CLAUSE_REDUCTION:
flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "reduction";
goto do_add;
case OMP_CLAUSE_LINEAR:
if (gimplify_expr (&OMP_CLAUSE_LINEAR_STEP (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
else
{
/* For combined #pragma omp parallel for simd, need to put
lastprivate and perhaps firstprivate too on the
parallel. Similarly for #pragma omp for simd. */
struct gimplify_omp_ctx *octx = outer_ctx;
decl = NULL_TREE;
if (omp_no_lastprivate (ctx))
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
do
{
if (OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
break;
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
decl = NULL_TREE;
break;
}
if (octx
&& octx->region_type == ORT_WORKSHARE
&& octx->combined_loop)
{
if (octx->outer_context
&& (octx->outer_context->region_type
== ORT_COMBINED_PARALLEL
|| (octx->outer_context->region_type
== ORT_COMBINED_TEAMS)))
octx = octx->outer_context;
else if (omp_check_private (octx, decl, false))
break;
}
else
break;
if (splay_tree_lookup (octx->variables,
(splay_tree_key) decl) != NULL)
{
octx = NULL;
break;
}
flags = GOVD_SEEN;
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
flags |= GOVD_FIRSTPRIVATE;
if (!OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
flags |= GOVD_LASTPRIVATE;
omp_add_variable (octx, decl, flags);
if (octx->outer_context == NULL)
break;
octx = octx->outer_context;
}
while (1);
if (octx
&& decl
&& (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
|| !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
omp_notice_variable (octx, decl, true);
}
flags = GOVD_LINEAR | GOVD_EXPLICIT;
if (OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
notice_outer = false;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
goto do_add;
case OMP_CLAUSE_MAP:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_P (decl) ? DECL_SIZE_UNIT (decl)
: TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p,
NULL, is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
if (!DECL_P (decl))
{
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p,
NULL, is_gimple_lvalue, fb_lvalue)
== GS_ERROR)
{
remove = true;
break;
}
break;
}
flags = GOVD_MAP | GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_DEPEND:
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPOUND_EXPR)
{
gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (c), 0), pre_p,
NULL, is_gimple_val, fb_rvalue);
OMP_CLAUSE_DECL (c) = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
}
if (error_operand_p (OMP_CLAUSE_DECL (c)))
{
remove = true;
break;
}
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (OMP_CLAUSE_DECL (c));
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_P (decl) ? DECL_SIZE_UNIT (decl)
: TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p,
NULL, is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
if (!DECL_P (decl))
{
if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p,
NULL, is_gimple_lvalue, fb_lvalue)
== GS_ERROR)
{
remove = true;
break;
}
break;
}
goto do_notice;
do_add:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
omp_add_variable (ctx, decl, flags);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c),
GOVD_LOCAL | GOVD_SEEN);
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)));
push_gimplify_context ();
gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c),
&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)));
OMP_CLAUSE_REDUCTION_INIT (c) = NULL_TREE;
OMP_CLAUSE_REDUCTION_MERGE (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_STMT (c))
{
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL,
NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c);
OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind;
}
gimplify_and_add (OMP_CLAUSE_LASTPRIVATE_STMT (c),
&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)));
OMP_CLAUSE_LASTPRIVATE_STMT (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_STMT (c))
{
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
if (TREE_CODE (OMP_CLAUSE_LINEAR_STMT (c)) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL,
NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
BIND_EXPR_BODY (bind) = OMP_CLAUSE_LINEAR_STMT (c);
OMP_CLAUSE_LINEAR_STMT (c) = bind;
}
gimplify_and_add (OMP_CLAUSE_LINEAR_STMT (c),
&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
pop_gimplify_context
(gimple_seq_first_stmt (OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)));
OMP_CLAUSE_LINEAR_STMT (c) = NULL_TREE;
gimplify_omp_ctxp = outer_ctx;
}
if (notice_outer)
goto do_notice;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_COPYPRIVATE
&& !remove
&& !omp_check_private (ctx, decl, true))
{
remove = true;
if (is_global_var (decl))
{
if (DECL_THREAD_LOCAL_P (decl))
remove = false;
else if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value
&& DECL_P (value)
&& DECL_THREAD_LOCAL_P (value))
remove = false;
}
}
if (remove)
error_at (OMP_CLAUSE_LOCATION (c),
"copyprivate variable %qE is not threadprivate"
" or private in outer context", DECL_NAME (decl));
}
do_notice:
if (outer_ctx)
omp_notice_variable (outer_ctx, decl, true);
if (check_non_private
&& region_type == ORT_WORKSHARE
&& omp_check_private (ctx, decl, false))
{
error ("%s variable %qE is private in outer context",
check_non_private, DECL_NAME (decl));
remove = true;
}
break;
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
OMP_CLAUSE_OPERAND (c, 0)
= gimple_boolify (OMP_CLAUSE_OPERAND (c, 0));
/* Fall through. */
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE__CILK_FOR_COUNT_:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_GANG
&& gimplify_expr (&OMP_CLAUSE_OPERAND (c, 1), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
remove = true;
break;
case OMP_CLAUSE_DEVICE_RESIDENT:
case OMP_CLAUSE_USE_DEVICE:
case OMP_CLAUSE_INDEPENDENT:
remove = true;
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
{
remove = true;
break;
}
if (gimplify_expr (&OMP_CLAUSE_ALIGNED_ALIGNMENT (c), pre_p, NULL,
is_gimple_val, fb_rvalue) == GS_ERROR)
{
remove = true;
break;
}
if (!is_global_var (decl)
&& TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
omp_add_variable (ctx, decl, GOVD_ALIGNED);
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
gimplify_omp_ctxp = ctx;
}
struct gimplify_adjust_omp_clauses_data
{
tree *list_p;
gimple_seq *pre_p;
};
/* For all variables that were not actually used within the context,
remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */
static int
gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
{
tree *list_p = ((struct gimplify_adjust_omp_clauses_data *) data)->list_p;
gimple_seq *pre_p
= ((struct gimplify_adjust_omp_clauses_data *) data)->pre_p;
tree decl = (tree) n->key;
unsigned flags = n->value;
enum omp_clause_code code;
tree clause;
bool private_debug;
if (flags & (GOVD_EXPLICIT | GOVD_LOCAL))
return 0;
if ((flags & GOVD_SEEN) == 0)
return 0;
if (flags & GOVD_DEBUG_PRIVATE)
{
gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE);
private_debug = true;
}
else if (flags & GOVD_MAP)
private_debug = false;
else
private_debug
= lang_hooks.decls.omp_private_debug_clause (decl,
!!(flags & GOVD_SHARED));
if (private_debug)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_MAP)
code = OMP_CLAUSE_MAP;
else if (flags & GOVD_SHARED)
{
if (is_global_var (decl))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
while (ctx != NULL)
{
splay_tree_node on
= splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_PRIVATE | GOVD_REDUCTION
| GOVD_LINEAR | GOVD_MAP)) != 0)
break;
ctx = ctx->outer_context;
}
if (ctx == NULL)
return 0;
}
code = OMP_CLAUSE_SHARED;
}
else if (flags & GOVD_PRIVATE)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_FIRSTPRIVATE)
code = OMP_CLAUSE_FIRSTPRIVATE;
else if (flags & GOVD_LASTPRIVATE)
code = OMP_CLAUSE_LASTPRIVATE;
else if (flags & GOVD_ALIGNED)
return 0;
else
gcc_unreachable ();
clause = build_omp_clause (input_location, code);
OMP_CLAUSE_DECL (clause) = decl;
OMP_CLAUSE_CHAIN (clause) = *list_p;
if (private_debug)
OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1;
else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF))
OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1;
else if (code == OMP_CLAUSE_MAP)
{
OMP_CLAUSE_SET_MAP_KIND (clause,
flags & GOVD_MAP_TO_ONLY
? GOMP_MAP_TO
: GOMP_MAP_TOFROM);
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (clause) = mem;
OMP_CLAUSE_SIZE (clause) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (gimplify_omp_ctxp->outer_context)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
omp_notice_variable (ctx, decl2, true);
omp_notice_variable (ctx, OMP_CLAUSE_SIZE (clause), true);
}
tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER);
OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (clause);
OMP_CLAUSE_CHAIN (clause) = nc;
}
else
OMP_CLAUSE_SIZE (clause) = DECL_SIZE_UNIT (decl);
}
if (code == OMP_CLAUSE_FIRSTPRIVATE && (flags & GOVD_LASTPRIVATE) != 0)
{
tree nc = build_omp_clause (input_location, OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (nc) = 1;
OMP_CLAUSE_CHAIN (nc) = *list_p;
OMP_CLAUSE_CHAIN (clause) = nc;
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
lang_hooks.decls.omp_finish_clause (nc, pre_p);
gimplify_omp_ctxp = ctx;
}
*list_p = clause;
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
gimplify_omp_ctxp = ctx->outer_context;
lang_hooks.decls.omp_finish_clause (clause, pre_p);
gimplify_omp_ctxp = ctx;
return 0;
}
static void
gimplify_adjust_omp_clauses (gimple_seq *pre_p, tree *list_p)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
tree c, decl;
while ((c = *list_p) != NULL)
{
splay_tree_node n;
bool remove = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LINEAR:
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = !(n->value & GOVD_SEEN);
if (! remove)
{
bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED;
if ((n->value & GOVD_DEBUG_PRIVATE)
|| lang_hooks.decls.omp_private_debug_clause (decl, shared))
{
gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== GOVD_PRIVATE));
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_PRIVATE_DEBUG (c) = 1;
}
}
break;
case OMP_CLAUSE_LASTPRIVATE:
/* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to
accurately reflect the presence of a FIRSTPRIVATE clause. */
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
= (n->value & GOVD_FIRSTPRIVATE) != 0;
if (omp_no_lastprivate (ctx))
{
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
remove = true;
else
OMP_CLAUSE_CODE (c) = OMP_CLAUSE_PRIVATE;
}
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (!is_global_var (decl))
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = n == NULL || !(n->value & GOVD_SEEN);
if (!remove && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
struct gimplify_omp_ctx *octx;
if (n != NULL
&& (n->value & (GOVD_DATA_SHARE_CLASS
& ~GOVD_FIRSTPRIVATE)))
remove = true;
else
for (octx = ctx->outer_context; octx;
octx = octx->outer_context)
{
n = splay_tree_lookup (octx->variables,
(splay_tree_key) decl);
if (n == NULL)
continue;
if (n->value & GOVD_LOCAL)
break;
/* We have to avoid assigning a shared variable
to itself when trying to add
__builtin_assume_aligned. */
if (n->value & GOVD_SHARED)
{
remove = true;
break;
}
}
}
}
else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0)
remove = true;
}
break;
case OMP_CLAUSE_MAP:
decl = OMP_CLAUSE_DECL (c);
if (!DECL_P (decl))
break;
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (ctx->region_type == ORT_TARGET && !(n->value & GOVD_SEEN))
remove = true;
else if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_POINTER)
{
/* For GOMP_MAP_FORCE_DEVICEPTR, we'll never enter here, because
for these, TREE_CODE (DECL_SIZE (decl)) will always be
INTEGER_CST. */
gcc_assert (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FORCE_DEVICEPTR);
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (c) = mem;
OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (ctx->outer_context)
{
omp_notice_variable (ctx->outer_context, decl2, true);
omp_notice_variable (ctx->outer_context,
OMP_CLAUSE_SIZE (c), true);
}
tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (nc) = decl;
OMP_CLAUSE_SIZE (nc) = size_zero_node;
OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER);
OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (c);
OMP_CLAUSE_CHAIN (c) = nc;
c = nc;
}
else if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl);
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE__CACHE_:
decl = OMP_CLAUSE_DECL (c);
if (!DECL_P (decl))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
tree mem = build_simple_mem_ref (decl2);
OMP_CLAUSE_DECL (c) = mem;
OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (decl));
if (ctx->outer_context)
{
omp_notice_variable (ctx->outer_context, decl2, true);
omp_notice_variable (ctx->outer_context,
OMP_CLAUSE_SIZE (c), true);
}
}
else if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl);
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE__CILK_FOR_COUNT_:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_DEVICE_RESIDENT:
case OMP_CLAUSE_USE_DEVICE:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
/* Add in any implicit data sharing. */
struct gimplify_adjust_omp_clauses_data data;
data.list_p = list_p;
data.pre_p = pre_p;
splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, &data);
gimplify_omp_ctxp = ctx->outer_context;
delete_omp_context (ctx);
}
/* Gimplify OACC_CACHE. */
static void
gimplify_oacc_cache (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimplify_scan_omp_clauses (&OACC_CACHE_CLAUSES (expr), pre_p, ORT_WORKSHARE);
gimplify_adjust_omp_clauses (pre_p, &OACC_CACHE_CLAUSES (expr));
/* TODO: Do something sensible with this information. */
*expr_p = NULL_TREE;
}
/* Gimplify the contents of an OMP_PARALLEL statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_parallel (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple g;
gimple_seq body = NULL;
gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p,
OMP_PARALLEL_COMBINED (expr)
? ORT_COMBINED_PARALLEL
: ORT_PARALLEL);
push_gimplify_context ();
g = gimplify_and_return_first (OMP_PARALLEL_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (pre_p, &OMP_PARALLEL_CLAUSES (expr));
g = gimple_build_omp_parallel (body,
OMP_PARALLEL_CLAUSES (expr),
NULL_TREE, NULL_TREE);
if (OMP_PARALLEL_COMBINED (expr))
gimple_omp_set_subcode (g, GF_OMP_PARALLEL_COMBINED);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Gimplify the contents of an OMP_TASK statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static void
gimplify_omp_task (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple g;
gimple_seq body = NULL;
gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p,
find_omp_clause (OMP_TASK_CLAUSES (expr),
OMP_CLAUSE_UNTIED)
? ORT_UNTIED_TASK : ORT_TASK);
push_gimplify_context ();
g = gimplify_and_return_first (OMP_TASK_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
gimplify_adjust_omp_clauses (pre_p, &OMP_TASK_CLAUSES (expr));
g = gimple_build_omp_task (body,
OMP_TASK_CLAUSES (expr),
NULL_TREE, NULL_TREE,
NULL_TREE, NULL_TREE, NULL_TREE);
gimplify_seq_add_stmt (pre_p, g);
*expr_p = NULL_TREE;
}
/* Helper function of gimplify_omp_for, find OMP_FOR resp. OMP_SIMD
with non-NULL OMP_FOR_INIT. */
static tree
find_combined_omp_for (tree *tp, int *walk_subtrees, void *)
{
*walk_subtrees = 0;
switch (TREE_CODE (*tp))
{
case OMP_FOR:
*walk_subtrees = 1;
/* FALLTHRU */
case OMP_SIMD:
if (OMP_FOR_INIT (*tp) != NULL_TREE)
return *tp;
break;
case BIND_EXPR:
case STATEMENT_LIST:
case OMP_PARALLEL:
*walk_subtrees = 1;
break;
default:
break;
}
return NULL_TREE;
}
/* Gimplify the gross structure of an OMP_FOR statement. */
static enum gimplify_status
gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
{
tree for_stmt, orig_for_stmt, inner_for_stmt = NULL_TREE, decl, var, t;
enum gimplify_status ret = GS_ALL_DONE;
enum gimplify_status tret;
gomp_for *gfor;
gimple_seq for_body, for_pre_body;
int i;
bool simd;
bitmap has_decl_expr = NULL;
orig_for_stmt = for_stmt = *expr_p;
switch (TREE_CODE (for_stmt))
{
case OMP_FOR:
case CILK_FOR:
case OMP_DISTRIBUTE:
case OACC_LOOP:
simd = false;
break;
case OMP_SIMD:
case CILK_SIMD:
simd = true;
break;
default:
gcc_unreachable ();
}
/* Set OMP_CLAUSE_LINEAR_NO_COPYIN flag on explicit linear
clause for the IV. */
if (simd && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), 0);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
decl = TREE_OPERAND (t, 0);
for (tree c = OMP_FOR_CLAUSES (for_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_DECL (c) == decl)
{
OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1;
break;
}
}
if (OMP_FOR_INIT (for_stmt) == NULL_TREE)
{
gcc_assert (TREE_CODE (for_stmt) != OACC_LOOP);
inner_for_stmt = walk_tree (&OMP_FOR_BODY (for_stmt),
find_combined_omp_for, NULL, NULL);
if (inner_for_stmt == NULL_TREE)
{
gcc_assert (seen_error ());
*expr_p = NULL_TREE;
return GS_ERROR;
}
}
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p,
simd ? ORT_SIMD : ORT_WORKSHARE);
if (TREE_CODE (for_stmt) == OMP_DISTRIBUTE)
gimplify_omp_ctxp->distribute = true;
/* Handle OMP_FOR_INIT. */
for_pre_body = NULL;
if (simd && OMP_FOR_PRE_BODY (for_stmt))
{
has_decl_expr = BITMAP_ALLOC (NULL);
if (TREE_CODE (OMP_FOR_PRE_BODY (for_stmt)) == DECL_EXPR
&& TREE_CODE (DECL_EXPR_DECL (OMP_FOR_PRE_BODY (for_stmt)))
== VAR_DECL)
{
t = OMP_FOR_PRE_BODY (for_stmt);
bitmap_set_bit (has_decl_expr, DECL_UID (DECL_EXPR_DECL (t)));
}
else if (TREE_CODE (OMP_FOR_PRE_BODY (for_stmt)) == STATEMENT_LIST)
{
tree_stmt_iterator si;
for (si = tsi_start (OMP_FOR_PRE_BODY (for_stmt)); !tsi_end_p (si);
tsi_next (&si))
{
t = tsi_stmt (si);
if (TREE_CODE (t) == DECL_EXPR
&& TREE_CODE (DECL_EXPR_DECL (t)) == VAR_DECL)
bitmap_set_bit (has_decl_expr, DECL_UID (DECL_EXPR_DECL (t)));
}
}
}
gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body);
OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
if (OMP_FOR_INIT (for_stmt) == NULL_TREE)
{
for_stmt = inner_for_stmt;
gimplify_omp_ctxp->combined_loop = true;
}
for_body = NULL;
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt)));
gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
== TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt)));
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
decl = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (decl));
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl))
|| POINTER_TYPE_P (TREE_TYPE (decl)));
/* Make sure the iteration variable is private. */
tree c = NULL_TREE;
tree c2 = NULL_TREE;
if (orig_for_stmt != for_stmt)
/* Do this only on innermost construct for combined ones. */;
else if (simd)
{
splay_tree_node n = splay_tree_lookup (gimplify_omp_ctxp->variables,
(splay_tree_key)decl);
omp_is_private (gimplify_omp_ctxp, decl,
1 + (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))
!= 1));
if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0)
omp_notice_variable (gimplify_omp_ctxp, decl, true);
else if (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
{
c = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1;
unsigned int flags = GOVD_LINEAR | GOVD_EXPLICIT | GOVD_SEEN;
if ((has_decl_expr
&& bitmap_bit_p (has_decl_expr, DECL_UID (decl)))
|| omp_no_lastprivate (gimplify_omp_ctxp))
{
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
struct gimplify_omp_ctx *outer
= gimplify_omp_ctxp->outer_context;
if (outer && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_LOCAL) != 0)
{
OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1;
flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER;
}
}
}
OMP_CLAUSE_DECL (c) = decl;
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c;
omp_add_variable (gimplify_omp_ctxp, decl, flags);
if (outer && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
if (outer->outer_context
&& (outer->outer_context->region_type
== ORT_COMBINED_PARALLEL))
outer = outer->outer_context;
else if (omp_check_private (outer, decl, false))
outer = NULL;
}
else if (outer->region_type != ORT_COMBINED_PARALLEL)
outer = NULL;
if (outer)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0)
{
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer->outer_context)
omp_notice_variable (outer->outer_context, decl,
true);
}
}
}
}
else
{
bool lastprivate
= (!has_decl_expr
|| !bitmap_bit_p (has_decl_expr, DECL_UID (decl)))
&& !omp_no_lastprivate (gimplify_omp_ctxp);
struct gimplify_omp_ctx *outer
= gimplify_omp_ctxp->outer_context;
if (outer && lastprivate)
{
if (outer->region_type == ORT_WORKSHARE
&& outer->combined_loop)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n != NULL && (n->value & GOVD_LOCAL) != 0)
{
lastprivate = false;
outer = NULL;
}
else if (outer->outer_context
&& (outer->outer_context->region_type
== ORT_COMBINED_PARALLEL))
outer = outer->outer_context;
else if (omp_check_private (outer, decl, false))
outer = NULL;
}
else if (outer->region_type != ORT_COMBINED_PARALLEL)
outer = NULL;
if (outer)
{
n = splay_tree_lookup (outer->variables,
(splay_tree_key)decl);
if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0)
{
omp_add_variable (outer, decl,
GOVD_LASTPRIVATE | GOVD_SEEN);
if (outer->outer_context)
omp_notice_variable (outer->outer_context, decl,
true);
}
}
}
c = build_omp_clause (input_location,
lastprivate ? OMP_CLAUSE_LASTPRIVATE
: OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c;
omp_add_variable (gimplify_omp_ctxp, decl,
(lastprivate ? GOVD_LASTPRIVATE : GOVD_PRIVATE)
| GOVD_EXPLICIT | GOVD_SEEN);
c = NULL_TREE;
}
}
else if (omp_is_private (gimplify_omp_ctxp, decl, 0))
omp_notice_variable (gimplify_omp_ctxp, decl, true);
else
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
/* If DECL is not a gimple register, create a temporary variable to act
as an iteration counter. This is valid, since DECL cannot be
modified in the body of the loop. Similarly for any iteration vars
in simd with collapse > 1 where the iterator vars must be
lastprivate. */
if (orig_for_stmt != for_stmt)
var = decl;
else if (!is_gimple_reg (decl)
|| (simd && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1))
{
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
TREE_OPERAND (t, 0) = var;
gimplify_seq_add_stmt (&for_body, gimple_build_assign (decl, var));
if (simd && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
{
c2 = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
OMP_CLAUSE_LINEAR_NO_COPYIN (c2) = 1;
OMP_CLAUSE_LINEAR_NO_COPYOUT (c2) = 1;
OMP_CLAUSE_DECL (c2) = var;
OMP_CLAUSE_CHAIN (c2) = OMP_FOR_CLAUSES (for_stmt);
OMP_FOR_CLAUSES (for_stmt) = c2;
omp_add_variable (gimplify_omp_ctxp, var,
GOVD_LINEAR | GOVD_EXPLICIT | GOVD_SEEN);
if (c == NULL_TREE)
{
c = c2;
c2 = NULL_TREE;
}
}
else
omp_add_variable (gimplify_omp_ctxp, var,
GOVD_PRIVATE | GOVD_SEEN);
}
else
var = decl;
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
if (ret == GS_ERROR)
return ret;
/* Handle OMP_FOR_COND. */
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gcc_assert (COMPARISON_CLASS_P (t));
gcc_assert (TREE_OPERAND (t, 0) == decl);
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
/* Handle OMP_FOR_INCR. */
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
switch (TREE_CODE (t))
{
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
{
tree decl = TREE_OPERAND (t, 0);
/* c_omp_for_incr_canonicalize_ptr() should have been
called to massage things appropriately. */
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
if (orig_for_stmt != for_stmt)
break;
t = build_int_cst (TREE_TYPE (decl), 1);
if (c)
OMP_CLAUSE_LINEAR_STEP (c) = t;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
}
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
/* c_omp_for_incr_canonicalize_ptr() should have been
called to massage things appropriately. */
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
if (orig_for_stmt != for_stmt)
break;
t = build_int_cst (TREE_TYPE (decl), -1);
if (c)
OMP_CLAUSE_LINEAR_STEP (c) = t;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t);
TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t;
break;
case MODIFY_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
t = TREE_OPERAND (t, 1);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
if (TREE_OPERAND (t, 1) == decl)
{
TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
TREE_OPERAND (t, 0) = var;
break;
}
/* Fallthru. */
case MINUS_EXPR:
case POINTER_PLUS_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
break;
default:
gcc_unreachable ();
}
tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
if (c)
{
tree step = TREE_OPERAND (t, 1);
tree stept = TREE_TYPE (decl);
if (POINTER_TYPE_P (stept))
stept = sizetype;
step = fold_convert (stept, step);
if (TREE_CODE (t) == MINUS_EXPR)
step = fold_build1 (NEGATE_EXPR, stept, step);
OMP_CLAUSE_LINEAR_STEP (c) = step;
if (step != TREE_OPERAND (t, 1))
{
tret = gimplify_expr (&OMP_CLAUSE_LINEAR_STEP (c),
&for_pre_body, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
}
}
break;
default:
gcc_unreachable ();
}
if (c2)
{
gcc_assert (c);
OMP_CLAUSE_LINEAR_STEP (c2) = OMP_CLAUSE_LINEAR_STEP (c);
}
if ((var != decl || TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1)
&& orig_for_stmt == for_stmt)
{
for (c = OMP_FOR_CLAUSES (for_stmt); c ; c = OMP_CLAUSE_CHAIN (c))
if (((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) == NULL)
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) == NULL))
&& OMP_CLAUSE_DECL (c) == decl)
{
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = TREE_OPERAND (t, 1);
gcc_assert (TREE_CODE (t) == PLUS_EXPR
|| TREE_CODE (t) == MINUS_EXPR
|| TREE_CODE (t) == POINTER_PLUS_EXPR);
gcc_assert (TREE_OPERAND (t, 0) == var);
t = build2 (TREE_CODE (t), TREE_TYPE (decl), decl,
TREE_OPERAND (t, 1));
gimple_seq *seq;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
seq = &OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c);
else
seq = &OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c);
gimplify_assign (decl, t, seq);
}
}
}
BITMAP_FREE (has_decl_expr);
gimplify_and_add (OMP_FOR_BODY (orig_for_stmt), &for_body);
if (orig_for_stmt != for_stmt)
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
decl = TREE_OPERAND (t, 0);
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
TREE_OPERAND (t, 0) = var;
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
TREE_OPERAND (t, 1) = copy_node (TREE_OPERAND (t, 1));
TREE_OPERAND (TREE_OPERAND (t, 1), 0) = var;
}
gimplify_adjust_omp_clauses (pre_p, &OMP_FOR_CLAUSES (orig_for_stmt));
int kind;
switch (TREE_CODE (orig_for_stmt))
{
case OMP_FOR: kind = GF_OMP_FOR_KIND_FOR; break;
case OMP_SIMD: kind = GF_OMP_FOR_KIND_SIMD; break;
case CILK_SIMD: kind = GF_OMP_FOR_KIND_CILKSIMD; break;
case CILK_FOR: kind = GF_OMP_FOR_KIND_CILKFOR; break;
case OMP_DISTRIBUTE: kind = GF_OMP_FOR_KIND_DISTRIBUTE; break;
case OACC_LOOP: kind = GF_OMP_FOR_KIND_OACC_LOOP; break;
default:
gcc_unreachable ();
}
gfor = gimple_build_omp_for (for_body, kind, OMP_FOR_CLAUSES (orig_for_stmt),
TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)),
for_pre_body);
if (orig_for_stmt != for_stmt)
gimple_omp_for_set_combined_p (gfor, true);
if (gimplify_omp_ctxp
&& (gimplify_omp_ctxp->combined_loop
|| (gimplify_omp_ctxp->region_type == ORT_COMBINED_PARALLEL
&& gimplify_omp_ctxp->outer_context
&& gimplify_omp_ctxp->outer_context->combined_loop)))
{
gimple_omp_for_set_combined_into_p (gfor, true);
if (gimplify_omp_ctxp->combined_loop)
gcc_assert (TREE_CODE (orig_for_stmt) == OMP_SIMD);
else
gcc_assert (TREE_CODE (orig_for_stmt) == OMP_FOR);
}
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
{
t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
gimple_omp_for_set_index (gfor, i, TREE_OPERAND (t, 0));
gimple_omp_for_set_initial (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i);
gimple_omp_for_set_cond (gfor, i, TREE_CODE (t));
gimple_omp_for_set_final (gfor, i, TREE_OPERAND (t, 1));
t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i);
gimple_omp_for_set_incr (gfor, i, TREE_OPERAND (t, 1));
}
gimplify_seq_add_stmt (pre_p, gfor);
if (ret != GS_ALL_DONE)
return GS_ERROR;
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Gimplify the gross structure of several OMP constructs. */
static void
gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p;
gimple stmt;
gimple_seq body = NULL;
enum omp_region_type ort;
switch (TREE_CODE (expr))
{
case OMP_SECTIONS:
case OMP_SINGLE:
ort = ORT_WORKSHARE;
break;
case OACC_KERNELS:
case OACC_PARALLEL:
case OMP_TARGET:
ort = ORT_TARGET;
break;
case OACC_DATA:
case OMP_TARGET_DATA:
ort = ORT_TARGET_DATA;
break;
case OMP_TEAMS:
ort = OMP_TEAMS_COMBINED (expr) ? ORT_COMBINED_TEAMS : ORT_TEAMS;
break;
default:
gcc_unreachable ();
}
gimplify_scan_omp_clauses (&OMP_CLAUSES (expr), pre_p, ort);
if (ort == ORT_TARGET || ort == ORT_TARGET_DATA)
{
push_gimplify_context ();
gimple g = gimplify_and_return_first (OMP_BODY (expr), &body);
if (gimple_code (g) == GIMPLE_BIND)
pop_gimplify_context (g);
else
pop_gimplify_context (NULL);
if (ort == ORT_TARGET_DATA)
{
enum built_in_function end_ix;
switch (TREE_CODE (expr))
{
case OACC_DATA:
end_ix = BUILT_IN_GOACC_DATA_END;
break;
case OMP_TARGET_DATA:
end_ix = BUILT_IN_GOMP_TARGET_END_DATA;
break;
default:
gcc_unreachable ();
}
tree fn = builtin_decl_explicit (end_ix);
g = gimple_build_call (fn, 0);
gimple_seq cleanup = NULL;
gimple_seq_add_stmt (&cleanup, g);
g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY);
body = NULL;
gimple_seq_add_stmt (&body, g);
}
}
else
gimplify_and_add (OMP_BODY (expr), &body);
gimplify_adjust_omp_clauses (pre_p, &OMP_CLAUSES (expr));
switch (TREE_CODE (expr))
{
case OACC_DATA:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_DATA,
OMP_CLAUSES (expr));
break;
case OACC_KERNELS:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_KERNELS,
OMP_CLAUSES (expr));
break;
case OACC_PARALLEL:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_PARALLEL,
OMP_CLAUSES (expr));
break;
case OMP_SECTIONS:
stmt = gimple_build_omp_sections (body, OMP_CLAUSES (expr));
break;
case OMP_SINGLE:
stmt = gimple_build_omp_single (body, OMP_CLAUSES (expr));
break;
case OMP_TARGET:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_REGION,
OMP_CLAUSES (expr));
break;
case OMP_TARGET_DATA:
stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_DATA,
OMP_CLAUSES (expr));
break;
case OMP_TEAMS:
stmt = gimple_build_omp_teams (body, OMP_CLAUSES (expr));
break;
default:
gcc_unreachable ();
}
gimplify_seq_add_stmt (pre_p, stmt);
*expr_p = NULL_TREE;
}
/* Gimplify the gross structure of OpenACC enter/exit data, update, and OpenMP
target update constructs. */
static void
gimplify_omp_target_update (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, clauses;
int kind;
gomp_target *stmt;
switch (TREE_CODE (expr))
{
case OACC_ENTER_DATA:
clauses = OACC_ENTER_DATA_CLAUSES (expr);
kind = GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA;
break;
case OACC_EXIT_DATA:
clauses = OACC_EXIT_DATA_CLAUSES (expr);
kind = GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA;
break;
case OACC_UPDATE:
clauses = OACC_UPDATE_CLAUSES (expr);
kind = GF_OMP_TARGET_KIND_OACC_UPDATE;
break;
case OMP_TARGET_UPDATE:
clauses = OMP_TARGET_UPDATE_CLAUSES (expr);
kind = GF_OMP_TARGET_KIND_UPDATE;
break;
default:
gcc_unreachable ();
}
gimplify_scan_omp_clauses (&clauses, pre_p, ORT_WORKSHARE);
gimplify_adjust_omp_clauses (pre_p, &clauses);
stmt = gimple_build_omp_target (NULL, kind, clauses);
gimplify_seq_add_stmt (pre_p, stmt);
*expr_p = NULL_TREE;
}
/* A subroutine of gimplify_omp_atomic. The front end is supposed to have
stabilized the lhs of the atomic operation as *ADDR. Return true if
EXPR is this stabilized form. */
static bool
goa_lhs_expr_p (tree expr, tree addr)
{
/* Also include casts to other type variants. The C front end is fond
of adding these for e.g. volatile variables. This is like
STRIP_TYPE_NOPS but includes the main variant lookup. */
STRIP_USELESS_TYPE_CONVERSION (expr);
if (TREE_CODE (expr) == INDIRECT_REF)
{
expr = TREE_OPERAND (expr, 0);
while (expr != addr
&& (CONVERT_EXPR_P (expr)
|| TREE_CODE (expr) == NON_LVALUE_EXPR)
&& TREE_CODE (expr) == TREE_CODE (addr)
&& types_compatible_p (TREE_TYPE (expr), TREE_TYPE (addr)))
{
expr = TREE_OPERAND (expr, 0);
addr = TREE_OPERAND (addr, 0);
}
if (expr == addr)
return true;
return (TREE_CODE (addr) == ADDR_EXPR
&& TREE_CODE (expr) == ADDR_EXPR
&& TREE_OPERAND (addr, 0) == TREE_OPERAND (expr, 0));
}
if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0))
return true;
return false;
}
/* Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an
expression does not involve the lhs, evaluate it into a temporary.
Return 1 if the lhs appeared as a subexpression, 0 if it did not,
or -1 if an error was encountered. */
static int
goa_stabilize_expr (tree *expr_p, gimple_seq *pre_p, tree lhs_addr,
tree lhs_var)
{
tree expr = *expr_p;
int saw_lhs;
if (goa_lhs_expr_p (expr, lhs_addr))
{
*expr_p = lhs_var;
return 1;
}
if (is_gimple_val (expr))
return 0;
saw_lhs = 0;
switch (TREE_CODE_CLASS (TREE_CODE (expr)))
{
case tcc_binary:
case tcc_comparison:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr,
lhs_var);
case tcc_unary:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr,
lhs_var);
break;
case tcc_expression:
switch (TREE_CODE (expr))
{
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p,
lhs_addr, lhs_var);
case TRUTH_NOT_EXPR:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p,
lhs_addr, lhs_var);
break;
case COMPOUND_EXPR:
/* Break out any preevaluations from cp_build_modify_expr. */
for (; TREE_CODE (expr) == COMPOUND_EXPR;
expr = TREE_OPERAND (expr, 1))
gimplify_stmt (&TREE_OPERAND (expr, 0), pre_p);
*expr_p = expr;
return goa_stabilize_expr (expr_p, pre_p, lhs_addr, lhs_var);
default:
break;
}
break;
default:
break;
}
if (saw_lhs == 0)
{
enum gimplify_status gs;
gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue);
if (gs != GS_ALL_DONE)
saw_lhs = -1;
}
return saw_lhs;
}
/* Gimplify an OMP_ATOMIC statement. */
static enum gimplify_status
gimplify_omp_atomic (tree *expr_p, gimple_seq *pre_p)
{
tree addr = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_CODE (*expr_p) == OMP_ATOMIC_READ
? NULL : TREE_OPERAND (*expr_p, 1);
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
tree tmp_load;
gomp_atomic_load *loadstmt;
gomp_atomic_store *storestmt;
tmp_load = create_tmp_reg (type);
if (rhs && goa_stabilize_expr (&rhs, pre_p, addr, tmp_load) < 0)
return GS_ERROR;
if (gimplify_expr (&addr, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
loadstmt = gimple_build_omp_atomic_load (tmp_load, addr);
gimplify_seq_add_stmt (pre_p, loadstmt);
if (rhs && gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
if (TREE_CODE (*expr_p) == OMP_ATOMIC_READ)
rhs = tmp_load;
storestmt = gimple_build_omp_atomic_store (rhs);
gimplify_seq_add_stmt (pre_p, storestmt);
if (OMP_ATOMIC_SEQ_CST (*expr_p))
{
gimple_omp_atomic_set_seq_cst (loadstmt);
gimple_omp_atomic_set_seq_cst (storestmt);
}
switch (TREE_CODE (*expr_p))
{
case OMP_ATOMIC_READ:
case OMP_ATOMIC_CAPTURE_OLD:
*expr_p = tmp_load;
gimple_omp_atomic_set_need_value (loadstmt);
break;
case OMP_ATOMIC_CAPTURE_NEW:
*expr_p = rhs;
gimple_omp_atomic_set_need_value (storestmt);
break;
default:
*expr_p = NULL;
break;
}
return GS_ALL_DONE;
}
/* Gimplify a TRANSACTION_EXPR. This involves gimplification of the
body, and adding some EH bits. */
static enum gimplify_status
gimplify_transaction (tree *expr_p, gimple_seq *pre_p)
{
tree expr = *expr_p, temp, tbody = TRANSACTION_EXPR_BODY (expr);
gimple body_stmt;
gtransaction *trans_stmt;
gimple_seq body = NULL;
int subcode = 0;
/* Wrap the transaction body in a BIND_EXPR so we have a context
where to put decls for OMP. */
if (TREE_CODE (tbody) != BIND_EXPR)
{
tree bind = build3 (BIND_EXPR, void_type_node, NULL, tbody, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody));
TRANSACTION_EXPR_BODY (expr) = bind;
}
push_gimplify_context ();
temp = voidify_wrapper_expr (*expr_p, NULL);
body_stmt = gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body);
pop_gimplify_context (body_stmt);
trans_stmt = gimple_build_transaction (body, NULL);
if (TRANSACTION_EXPR_OUTER (expr))
subcode = GTMA_IS_OUTER;
else if (TRANSACTION_EXPR_RELAXED (expr))
subcode = GTMA_IS_RELAXED;
gimple_transaction_set_subcode (trans_stmt, subcode);
gimplify_seq_add_stmt (pre_p, trans_stmt);
if (temp)
{
*expr_p = temp;
return GS_OK;
}
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* Convert the GENERIC expression tree *EXPR_P to GIMPLE. If the
expression produces a value to be used as an operand inside a GIMPLE
statement, the value will be stored back in *EXPR_P. This value will
be a tree of class tcc_declaration, tcc_constant, tcc_reference or
an SSA_NAME. The corresponding sequence of GIMPLE statements is
emitted in PRE_P and POST_P.
Additionally, this process may overwrite parts of the input
expression during gimplification. Ideally, it should be
possible to do non-destructive gimplification.
EXPR_P points to the GENERIC expression to convert to GIMPLE. If
the expression needs to evaluate to a value to be used as
an operand in a GIMPLE statement, this value will be stored in
*EXPR_P on exit. This happens when the caller specifies one
of fb_lvalue or fb_rvalue fallback flags.
PRE_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of EXPR and all the side-effects that must
be executed before the main expression. On exit, the last
statement of PRE_P is the core statement being gimplified. For
instance, when gimplifying 'if (++a)' the last statement in
PRE_P will be 'if (t.1)' where t.1 is the result of
pre-incrementing 'a'.
POST_P will contain the sequence of GIMPLE statements corresponding
to the evaluation of all the side-effects that must be executed
after the main expression. If this is NULL, the post
side-effects are stored at the end of PRE_P.
The reason why the output is split in two is to handle post
side-effects explicitly. In some cases, an expression may have
inner and outer post side-effects which need to be emitted in
an order different from the one given by the recursive
traversal. For instance, for the expression (*p--)++ the post
side-effects of '--' must actually occur *after* the post
side-effects of '++'. However, gimplification will first visit
the inner expression, so if a separate POST sequence was not
used, the resulting sequence would be:
1 t.1 = *p
2 p = p - 1
3 t.2 = t.1 + 1
4 *p = t.2
However, the post-decrement operation in line #2 must not be
evaluated until after the store to *p at line #4, so the
correct sequence should be:
1 t.1 = *p
2 t.2 = t.1 + 1
3 *p = t.2
4 p = p - 1
So, by specifying a separate post queue, it is possible
to emit the post side-effects in the correct order.
If POST_P is NULL, an internal queue will be used. Before
returning to the caller, the sequence POST_P is appended to
the main output sequence PRE_P.
GIMPLE_TEST_F points to a function that takes a tree T and
returns nonzero if T is in the GIMPLE form requested by the
caller. The GIMPLE predicates are in gimple.c.
FALLBACK tells the function what sort of a temporary we want if
gimplification cannot produce an expression that complies with
GIMPLE_TEST_F.
fb_none means that no temporary should be generated
fb_rvalue means that an rvalue is OK to generate
fb_lvalue means that an lvalue is OK to generate
fb_either means that either is OK, but an lvalue is preferable.
fb_mayfail means that gimplification may fail (in which case
GS_ERROR will be returned)
The return value is either GS_ERROR or GS_ALL_DONE, since this
function iterates until EXPR is completely gimplified or an error
occurs. */
enum gimplify_status
gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
bool (*gimple_test_f) (tree), fallback_t fallback)
{
tree tmp;
gimple_seq internal_pre = NULL;
gimple_seq internal_post = NULL;
tree save_expr;
bool is_statement;
location_t saved_location;
enum gimplify_status ret;
gimple_stmt_iterator pre_last_gsi, post_last_gsi;
save_expr = *expr_p;
if (save_expr == NULL_TREE)
return GS_ALL_DONE;
/* If we are gimplifying a top-level statement, PRE_P must be valid. */
is_statement = gimple_test_f == is_gimple_stmt;
if (is_statement)
gcc_assert (pre_p);
/* Consistency checks. */
if (gimple_test_f == is_gimple_reg)
gcc_assert (fallback & (fb_rvalue | fb_lvalue));
else if (gimple_test_f == is_gimple_val
|| gimple_test_f == is_gimple_call_addr
|| gimple_test_f == is_gimple_condexpr
|| gimple_test_f == is_gimple_mem_rhs
|| gimple_test_f == is_gimple_mem_rhs_or_call
|| gimple_test_f == is_gimple_reg_rhs
|| gimple_test_f == is_gimple_reg_rhs_or_call
|| gimple_test_f == is_gimple_asm_val
|| gimple_test_f == is_gimple_mem_ref_addr)
gcc_assert (fallback & fb_rvalue);
else if (gimple_test_f == is_gimple_min_lval
|| gimple_test_f == is_gimple_lvalue)
gcc_assert (fallback & fb_lvalue);
else if (gimple_test_f == is_gimple_addressable)
gcc_assert (fallback & fb_either);
else if (gimple_test_f == is_gimple_stmt)
gcc_assert (fallback == fb_none);
else
{
/* We should have recognized the GIMPLE_TEST_F predicate to
know what kind of fallback to use in case a temporary is
needed to hold the value or address of *EXPR_P. */
gcc_unreachable ();
}
/* We used to check the predicate here and return immediately if it
succeeds. This is wrong; the design is for gimplification to be
idempotent, and for the predicates to only test for valid forms, not
whether they are fully simplified. */
if (pre_p == NULL)
pre_p = &internal_pre;
if (post_p == NULL)
post_p = &internal_post;
/* Remember the last statements added to PRE_P and POST_P. Every
new statement added by the gimplification helpers needs to be
annotated with location information. To centralize the
responsibility, we remember the last statement that had been
added to both queues before gimplifying *EXPR_P. If
gimplification produces new statements in PRE_P and POST_P, those
statements will be annotated with the same location information
as *EXPR_P. */
pre_last_gsi = gsi_last (*pre_p);
post_last_gsi = gsi_last (*post_p);
saved_location = input_location;
if (save_expr != error_mark_node
&& EXPR_HAS_LOCATION (*expr_p))
input_location = EXPR_LOCATION (*expr_p);
/* Loop over the specific gimplifiers until the toplevel node
remains the same. */
do
{
/* Strip away as many useless type conversions as possible
at the toplevel. */
STRIP_USELESS_TYPE_CONVERSION (*expr_p);
/* Remember the expr. */
save_expr = *expr_p;
/* Die, die, die, my darling. */
if (save_expr == error_mark_node
|| (TREE_TYPE (save_expr)
&& TREE_TYPE (save_expr) == error_mark_node))
{
ret = GS_ERROR;
break;
}
/* Do any language-specific gimplification. */
ret = ((enum gimplify_status)
lang_hooks.gimplify_expr (expr_p, pre_p, post_p));
if (ret == GS_OK)
{
if (*expr_p == NULL_TREE)
break;
if (*expr_p != save_expr)
continue;
}
else if (ret != GS_UNHANDLED)
break;
/* Make sure that all the cases set 'ret' appropriately. */
ret = GS_UNHANDLED;
switch (TREE_CODE (*expr_p))
{
/* First deal with the special cases. */
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
ret = gimplify_self_mod_expr (expr_p, pre_p, post_p,
fallback != fb_none,
TREE_TYPE (*expr_p));
break;
case VIEW_CONVERT_EXPR:
if (is_gimple_reg_type (TREE_TYPE (*expr_p))
&& is_gimple_reg_type (TREE_TYPE (TREE_OPERAND (*expr_p, 0))))
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
}
/* Fallthru. */
case ARRAY_REF:
case ARRAY_RANGE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
ret = gimplify_compound_lval (expr_p, pre_p, post_p,
fallback ? fallback : fb_rvalue);
break;
case COND_EXPR:
ret = gimplify_cond_expr (expr_p, pre_p, fallback);
/* C99 code may assign to an array in a structure value of a
conditional expression, and this has undefined behavior
only on execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
ret = GS_OK;
}
break;
case CALL_EXPR:
ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none);
/* C99 code may assign to an array in a structure returned
from a function, and this has undefined behavior only on
execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
ret = GS_OK;
}
break;
case TREE_LIST:
gcc_unreachable ();
case COMPOUND_EXPR:
ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none);
break;
case COMPOUND_LITERAL_EXPR:
ret = gimplify_compound_literal_expr (expr_p, pre_p,
gimple_test_f, fallback);
break;
case MODIFY_EXPR:
case INIT_EXPR:
ret = gimplify_modify_expr (expr_p, pre_p, post_p,
fallback != fb_none);
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
{
/* Preserve the original type of the expression and the
source location of the outer expression. */
tree org_type = TREE_TYPE (*expr_p);
*expr_p = gimple_boolify (*expr_p);
*expr_p = build3_loc (input_location, COND_EXPR,
org_type, *expr_p,
fold_convert_loc
(input_location,
org_type, boolean_true_node),
fold_convert_loc
(input_location,
org_type, boolean_false_node));
ret = GS_OK;
break;
}
case TRUTH_NOT_EXPR:
{
tree type = TREE_TYPE (*expr_p);
/* The parsers are careful to generate TRUTH_NOT_EXPR
only with operands that are always zero or one.
We do not fold here but handle the only interesting case
manually, as fold may re-introduce the TRUTH_NOT_EXPR. */
*expr_p = gimple_boolify (*expr_p);
if (TYPE_PRECISION (TREE_TYPE (*expr_p)) == 1)
*expr_p = build1_loc (input_location, BIT_NOT_EXPR,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0));
else
*expr_p = build2_loc (input_location, BIT_XOR_EXPR,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
build_int_cst (TREE_TYPE (*expr_p), 1));
if (!useless_type_conversion_p (type, TREE_TYPE (*expr_p)))
*expr_p = fold_convert_loc (input_location, type, *expr_p);
ret = GS_OK;
break;
}
case ADDR_EXPR:
ret = gimplify_addr_expr (expr_p, pre_p, post_p);
break;
case ANNOTATE_EXPR:
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree kind = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (cond);
if (!INTEGRAL_TYPE_P (type))
{
*expr_p = cond;
ret = GS_OK;
break;
}
tree tmp = create_tmp_var (type);
gimplify_arg (&cond, pre_p, EXPR_LOCATION (*expr_p));
gcall *call
= gimple_build_call_internal (IFN_ANNOTATE, 2, cond, kind);
gimple_call_set_lhs (call, tmp);
gimplify_seq_add_stmt (pre_p, call);
*expr_p = tmp;
ret = GS_ALL_DONE;
break;
}
case VA_ARG_EXPR:
ret = gimplify_va_arg_expr (expr_p, pre_p, post_p);
break;
CASE_CONVERT:
if (IS_EMPTY_STMT (*expr_p))
{
ret = GS_ALL_DONE;
break;
}
if (VOID_TYPE_P (TREE_TYPE (*expr_p))
|| fallback == fb_none)
{
/* Just strip a conversion to void (or in void context) and
try again. */
*expr_p = TREE_OPERAND (*expr_p, 0);
ret = GS_OK;
break;
}
ret = gimplify_conversion (expr_p);
if (ret == GS_ERROR)
break;
if (*expr_p != save_expr)
break;
/* FALLTHRU */
case FIX_TRUNC_EXPR:
/* unary_expr: ... | '(' cast ')' val | ... */
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
case INDIRECT_REF:
{
bool volatilep = TREE_THIS_VOLATILE (*expr_p);
bool notrap = TREE_THIS_NOTRAP (*expr_p);
tree saved_ptr_type = TREE_TYPE (TREE_OPERAND (*expr_p, 0));
*expr_p = fold_indirect_ref_loc (input_location, *expr_p);
if (*expr_p != save_expr)
{
ret = GS_OK;
break;
}
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_reg, fb_rvalue);
if (ret == GS_ERROR)
break;
recalculate_side_effects (*expr_p);
*expr_p = fold_build2_loc (input_location, MEM_REF,
TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
build_int_cst (saved_ptr_type, 0));
TREE_THIS_VOLATILE (*expr_p) = volatilep;
TREE_THIS_NOTRAP (*expr_p) = notrap;
ret = GS_OK;
break;
}
/* We arrive here through the various re-gimplifcation paths. */
case MEM_REF:
/* First try re-folding the whole thing. */
tmp = fold_binary (MEM_REF, TREE_TYPE (*expr_p),
TREE_OPERAND (*expr_p, 0),
TREE_OPERAND (*expr_p, 1));
if (tmp)
{
*expr_p = tmp;
recalculate_side_effects (*expr_p);
ret = GS_OK;
break;
}
/* Avoid re-gimplifying the address operand if it is already
in suitable form. Re-gimplifying would mark the address
operand addressable. Always gimplify when not in SSA form
as we still may have to gimplify decls with value-exprs. */
if (!gimplify_ctxp || !gimplify_ctxp->into_ssa
|| !is_gimple_mem_ref_addr (TREE_OPERAND (*expr_p, 0)))
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_mem_ref_addr, fb_rvalue);
if (ret == GS_ERROR)
break;
}
recalculate_side_effects (*expr_p);
ret = GS_ALL_DONE;
break;
/* Constants need not be gimplified. */
case INTEGER_CST:
case REAL_CST:
case FIXED_CST:
case STRING_CST:
case COMPLEX_CST:
case VECTOR_CST:
/* Drop the overflow flag on constants, we do not want
that in the GIMPLE IL. */
if (TREE_OVERFLOW_P (*expr_p))
*expr_p = drop_tree_overflow (*expr_p);
ret = GS_ALL_DONE;
break;
case CONST_DECL:
/* If we require an lvalue, such as for ADDR_EXPR, retain the
CONST_DECL node. Otherwise the decl is replaceable by its
value. */
/* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */
if (fallback & fb_lvalue)
ret = GS_ALL_DONE;
else
{
*expr_p = DECL_INITIAL (*expr_p);
ret = GS_OK;
}
break;
case DECL_EXPR:
ret = gimplify_decl_expr (expr_p, pre_p);
break;
case BIND_EXPR:
ret = gimplify_bind_expr (expr_p, pre_p);
break;
case LOOP_EXPR:
ret = gimplify_loop_expr (expr_p, pre_p);
break;
case SWITCH_EXPR:
ret = gimplify_switch_expr (expr_p, pre_p);
break;
case EXIT_EXPR:
ret = gimplify_exit_expr (expr_p);
break;
case GOTO_EXPR:
/* If the target is not LABEL, then it is a computed jump
and the target needs to be gimplified. */
if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL)
{
ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p,
NULL, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
break;
}
gimplify_seq_add_stmt (pre_p,
gimple_build_goto (GOTO_DESTINATION (*expr_p)));
ret = GS_ALL_DONE;
break;
case PREDICT_EXPR:
gimplify_seq_add_stmt (pre_p,
gimple_build_predict (PREDICT_EXPR_PREDICTOR (*expr_p),
PREDICT_EXPR_OUTCOME (*expr_p)));
ret = GS_ALL_DONE;
break;
case LABEL_EXPR:
ret = GS_ALL_DONE;
gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p))
== current_function_decl);
gimplify_seq_add_stmt (pre_p,
gimple_build_label (LABEL_EXPR_LABEL (*expr_p)));
break;
case CASE_LABEL_EXPR:
ret = gimplify_case_label_expr (expr_p, pre_p);
break;
case RETURN_EXPR:
ret = gimplify_return_expr (*expr_p, pre_p);
break;
case CONSTRUCTOR:
/* Don't reduce this in place; let gimplify_init_constructor work its
magic. Buf if we're just elaborating this for side effects, just
gimplify any element that has side-effects. */
if (fallback == fb_none)
{
unsigned HOST_WIDE_INT ix;
tree val;
tree temp = NULL_TREE;
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (*expr_p), ix, val)
if (TREE_SIDE_EFFECTS (val))
append_to_statement_list (val, &temp);
*expr_p = temp;
ret = temp ? GS_OK : GS_ALL_DONE;
}
/* C99 code may assign to an array in a constructed
structure or union, and this has undefined behavior only
on execution, so create a temporary if an lvalue is
required. */
else if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
ret = GS_OK;
}
else
ret = GS_ALL_DONE;
break;
/* The following are special cases that are not handled by the
original GIMPLE grammar. */
/* SAVE_EXPR nodes are converted into a GIMPLE identifier and
eliminated. */
case SAVE_EXPR:
ret = gimplify_save_expr (expr_p, pre_p, post_p);
break;
case BIT_FIELD_REF:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_lvalue, fb_either);
recalculate_side_effects (*expr_p);
break;
case TARGET_MEM_REF:
{
enum gimplify_status r0 = GS_ALL_DONE, r1 = GS_ALL_DONE;
if (TMR_BASE (*expr_p))
r0 = gimplify_expr (&TMR_BASE (*expr_p), pre_p,
post_p, is_gimple_mem_ref_addr, fb_either);
if (TMR_INDEX (*expr_p))
r1 = gimplify_expr (&TMR_INDEX (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
if (TMR_INDEX2 (*expr_p))
r1 = gimplify_expr (&TMR_INDEX2 (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
/* TMR_STEP and TMR_OFFSET are always integer constants. */
ret = MIN (r0, r1);
}
break;
case NON_LVALUE_EXPR:
/* This should have been stripped above. */
gcc_unreachable ();
case ASM_EXPR:
ret = gimplify_asm_expr (expr_p, pre_p, post_p);
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
{
gimple_seq eval, cleanup;
gtry *try_;
/* Calls to destructors are generated automatically in FINALLY/CATCH
block. They should have location as UNKNOWN_LOCATION. However,
gimplify_call_expr will reset these call stmts to input_location
if it finds stmt's location is unknown. To prevent resetting for
destructors, we set the input_location to unknown.
Note that this only affects the destructor calls in FINALLY/CATCH
block, and will automatically reset to its original value by the
end of gimplify_expr. */
input_location = UNKNOWN_LOCATION;
eval = cleanup = NULL;
gimplify_and_add (TREE_OPERAND (*expr_p, 0), &eval);
gimplify_and_add (TREE_OPERAND (*expr_p, 1), &cleanup);
/* Don't create bogus GIMPLE_TRY with empty cleanup. */
if (gimple_seq_empty_p (cleanup))
{
gimple_seq_add_seq (pre_p, eval);
ret = GS_ALL_DONE;
break;
}
try_ = gimple_build_try (eval, cleanup,
TREE_CODE (*expr_p) == TRY_FINALLY_EXPR
? GIMPLE_TRY_FINALLY
: GIMPLE_TRY_CATCH);
if (EXPR_HAS_LOCATION (save_expr))
gimple_set_location (try_, EXPR_LOCATION (save_expr));
else if (LOCATION_LOCUS (saved_location) != UNKNOWN_LOCATION)
gimple_set_location (try_, saved_location);
if (TREE_CODE (*expr_p) == TRY_CATCH_EXPR)
gimple_try_set_catch_is_cleanup (try_,
TRY_CATCH_IS_CLEANUP (*expr_p));
gimplify_seq_add_stmt (pre_p, try_);
ret = GS_ALL_DONE;
break;
}
case CLEANUP_POINT_EXPR:
ret = gimplify_cleanup_point_expr (expr_p, pre_p);
break;
case TARGET_EXPR:
ret = gimplify_target_expr (expr_p, pre_p, post_p);
break;
case CATCH_EXPR:
{
gimple c;
gimple_seq handler = NULL;
gimplify_and_add (CATCH_BODY (*expr_p), &handler);
c = gimple_build_catch (CATCH_TYPES (*expr_p), handler);
gimplify_seq_add_stmt (pre_p, c);
ret = GS_ALL_DONE;
break;
}
case EH_FILTER_EXPR:
{
gimple ehf;
gimple_seq failure = NULL;
gimplify_and_add (EH_FILTER_FAILURE (*expr_p), &failure);
ehf = gimple_build_eh_filter (EH_FILTER_TYPES (*expr_p), failure);
gimple_set_no_warning (ehf, TREE_NO_WARNING (*expr_p));
gimplify_seq_add_stmt (pre_p, ehf);
ret = GS_ALL_DONE;
break;
}
case OBJ_TYPE_REF:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p,
post_p, is_gimple_val, fb_rvalue);
TREE_SIDE_EFFECTS (*expr_p) = 0;
ret = MIN (r0, r1);
}
break;
case LABEL_DECL:
/* We get here when taking the address of a label. We mark
the label as "forced"; meaning it can never be removed and
it is a potential target for any computed goto. */
FORCED_LABEL (*expr_p) = 1;
ret = GS_ALL_DONE;
break;
case STATEMENT_LIST:
ret = gimplify_statement_list (expr_p, pre_p);
break;
case WITH_SIZE_EXPR:
{
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p == &internal_post ? NULL : post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = GS_ALL_DONE;
}
break;
case VAR_DECL:
case PARM_DECL:
ret = gimplify_var_or_parm_decl (expr_p);
break;
case RESULT_DECL:
/* When within an OMP context, notice uses of variables. */
if (gimplify_omp_ctxp)
omp_notice_variable (gimplify_omp_ctxp, *expr_p, true);
ret = GS_ALL_DONE;
break;
case SSA_NAME:
/* Allow callbacks into the gimplifier during optimization. */
ret = GS_ALL_DONE;
break;
case OMP_PARALLEL:
gimplify_omp_parallel (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_TASK:
gimplify_omp_task (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_FOR:
case OMP_SIMD:
case CILK_SIMD:
case CILK_FOR:
case OMP_DISTRIBUTE:
case OACC_LOOP:
ret = gimplify_omp_for (expr_p, pre_p);
break;
case OACC_CACHE:
gimplify_oacc_cache (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_HOST_DATA:
case OACC_DECLARE:
sorry ("directive not yet implemented");
ret = GS_ALL_DONE;
break;
case OACC_KERNELS:
if (OACC_KERNELS_COMBINED (*expr_p))
sorry ("directive not yet implemented");
else
gimplify_omp_workshare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_PARALLEL:
if (OACC_PARALLEL_COMBINED (*expr_p))
sorry ("directive not yet implemented");
else
gimplify_omp_workshare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_DATA:
case OMP_SECTIONS:
case OMP_SINGLE:
case OMP_TARGET:
case OMP_TARGET_DATA:
case OMP_TEAMS:
gimplify_omp_workshare (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OACC_ENTER_DATA:
case OACC_EXIT_DATA:
case OACC_UPDATE:
case OMP_TARGET_UPDATE:
gimplify_omp_target_update (expr_p, pre_p);
ret = GS_ALL_DONE;
break;
case OMP_SECTION:
case OMP_MASTER:
case OMP_TASKGROUP:
case OMP_ORDERED:
case OMP_CRITICAL:
{
gimple_seq body = NULL;
gimple g;
gimplify_and_add (OMP_BODY (*expr_p), &body);
switch (TREE_CODE (*expr_p))
{
case OMP_SECTION:
g = gimple_build_omp_section (body);
break;
case OMP_MASTER:
g = gimple_build_omp_master (body);
break;
case OMP_TASKGROUP:
{
gimple_seq cleanup = NULL;
tree fn
= builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_END);
g = gimple_build_call (fn, 0);
gimple_seq_add_stmt (&cleanup, g);
g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY);
body = NULL;
gimple_seq_add_stmt (&body, g);
g = gimple_build_omp_taskgroup (body);
}
break;
case OMP_ORDERED:
g = gimple_build_omp_ordered (body);
break;
case OMP_CRITICAL:
g = gimple_build_omp_critical (body,
OMP_CRITICAL_NAME (*expr_p));
break;
default:
gcc_unreachable ();
}
gimplify_seq_add_stmt (pre_p, g);
ret = GS_ALL_DONE;
break;
}
case OMP_ATOMIC:
case OMP_ATOMIC_READ:
case OMP_ATOMIC_CAPTURE_OLD:
case OMP_ATOMIC_CAPTURE_NEW:
ret = gimplify_omp_atomic (expr_p, pre_p);
break;
case TRANSACTION_EXPR:
ret = gimplify_transaction (expr_p, pre_p);
break;
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
{
tree orig_type = TREE_TYPE (*expr_p);
tree new_type, xop0, xop1;
*expr_p = gimple_boolify (*expr_p);
new_type = TREE_TYPE (*expr_p);
if (!useless_type_conversion_p (orig_type, new_type))
{
*expr_p = fold_convert_loc (input_location, orig_type, *expr_p);
ret = GS_OK;
break;
}
/* Boolified binary truth expressions are semantically equivalent
to bitwise binary expressions. Canonicalize them to the
bitwise variant. */
switch (TREE_CODE (*expr_p))
{
case TRUTH_AND_EXPR:
TREE_SET_CODE (*expr_p, BIT_AND_EXPR);
break;
case TRUTH_OR_EXPR:
TREE_SET_CODE (*expr_p, BIT_IOR_EXPR);
break;
case TRUTH_XOR_EXPR:
TREE_SET_CODE (*expr_p, BIT_XOR_EXPR);
break;
default:
break;
}
/* Now make sure that operands have compatible type to
expression's new_type. */
xop0 = TREE_OPERAND (*expr_p, 0);
xop1 = TREE_OPERAND (*expr_p, 1);
if (!useless_type_conversion_p (new_type, TREE_TYPE (xop0)))
TREE_OPERAND (*expr_p, 0) = fold_convert_loc (input_location,
new_type,
xop0);
if (!useless_type_conversion_p (new_type, TREE_TYPE (xop1)))
TREE_OPERAND (*expr_p, 1) = fold_convert_loc (input_location,
new_type,
xop1);
/* Continue classified as tcc_binary. */
goto expr_2;
}
case FMA_EXPR:
case VEC_COND_EXPR:
case VEC_PERM_EXPR:
/* Classified as tcc_expression. */
goto expr_3;
case POINTER_PLUS_EXPR:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
ret = MIN (r0, r1);
break;
}
case CILK_SYNC_STMT:
{
if (!fn_contains_cilk_spawn_p (cfun))
{
error_at (EXPR_LOCATION (*expr_p),
"expected %<_Cilk_spawn%> before %<_Cilk_sync%>");
ret = GS_ERROR;
}
else
{
gimplify_cilk_sync (expr_p, pre_p);
ret = GS_ALL_DONE;
}
break;
}
default:
switch (TREE_CODE_CLASS (TREE_CODE (*expr_p)))
{
case tcc_comparison:
/* Handle comparison of objects of non scalar mode aggregates
with a call to memcmp. It would be nice to only have to do
this for variable-sized objects, but then we'd have to allow
the same nest of reference nodes we allow for MODIFY_EXPR and
that's too complex.
Compare scalar mode aggregates as scalar mode values. Using
memcmp for them would be very inefficient at best, and is
plain wrong if bitfields are involved. */
{
tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1));
/* Vector comparisons need no boolification. */
if (TREE_CODE (type) == VECTOR_TYPE)
goto expr_2;
else if (!AGGREGATE_TYPE_P (type))
{
tree org_type = TREE_TYPE (*expr_p);
*expr_p = gimple_boolify (*expr_p);
if (!useless_type_conversion_p (org_type,
TREE_TYPE (*expr_p)))
{
*expr_p = fold_convert_loc (input_location,
org_type, *expr_p);
ret = GS_OK;
}
else
goto expr_2;
}
else if (TYPE_MODE (type) != BLKmode)
ret = gimplify_scalar_mode_aggregate_compare (expr_p);
else
ret = gimplify_variable_sized_compare (expr_p);
break;
}
/* If *EXPR_P does not need to be special-cased, handle it
according to its class. */
case tcc_unary:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
break;
case tcc_binary:
expr_2:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (r0, r1);
break;
}
expr_3:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (MIN (r0, r1), r2);
break;
}
case tcc_declaration:
case tcc_constant:
ret = GS_ALL_DONE;
goto dont_recalculate;
default:
gcc_unreachable ();
}
recalculate_side_effects (*expr_p);
dont_recalculate:
break;
}
gcc_assert (*expr_p || ret != GS_OK);
}
while (ret == GS_OK);
/* If we encountered an error_mark somewhere nested inside, either
stub out the statement or propagate the error back out. */
if (ret == GS_ERROR)
{
if (is_statement)
*expr_p = NULL;
goto out;
}
/* This was only valid as a return value from the langhook, which
we handled. Make sure it doesn't escape from any other context. */
gcc_assert (ret != GS_UNHANDLED);
if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p))
{
/* We aren't looking for a value, and we don't have a valid
statement. If it doesn't have side-effects, throw it away. */
if (!TREE_SIDE_EFFECTS (*expr_p))
*expr_p = NULL;
else if (!TREE_THIS_VOLATILE (*expr_p))
{
/* This is probably a _REF that contains something nested that
has side effects. Recurse through the operands to find it. */
enum tree_code code = TREE_CODE (*expr_p);
switch (code)
{
case COMPONENT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
gimple_test_f, fallback);
break;
default:
/* Anything else with side-effects must be converted to
a valid statement before we get here. */
gcc_unreachable ();
}
*expr_p = NULL;
}
else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p))
&& TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode)
{
/* Historically, the compiler has treated a bare reference
to a non-BLKmode volatile lvalue as forcing a load. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p));
/* Normally, we do not want to create a temporary for a
TREE_ADDRESSABLE type because such a type should not be
copied by bitwise-assignment. However, we make an
exception here, as all we are doing here is ensuring that
we read the bytes that make up the type. We use
create_tmp_var_raw because create_tmp_var will abort when
given a TREE_ADDRESSABLE type. */
tree tmp = create_tmp_var_raw (type, "vol");
gimple_add_tmp_var (tmp);
gimplify_assign (tmp, *expr_p, pre_p);
*expr_p = NULL;
}
else
/* We can't do anything useful with a volatile reference to
an incomplete type, so just throw it away. Likewise for
a BLKmode type, since any implicit inner load should
already have been turned into an explicit one by the
gimplification process. */
*expr_p = NULL;
}
/* If we are gimplifying at the statement level, we're done. Tack
everything together and return. */
if (fallback == fb_none || is_statement)
{
/* Since *EXPR_P has been converted into a GIMPLE tuple, clear
it out for GC to reclaim it. */
*expr_p = NULL_TREE;
if (!gimple_seq_empty_p (internal_pre)
|| !gimple_seq_empty_p (internal_post))
{
gimplify_seq_add_seq (&internal_pre, internal_post);
gimplify_seq_add_seq (pre_p, internal_pre);
}
/* The result of gimplifying *EXPR_P is going to be the last few
statements in *PRE_P and *POST_P. Add location information
to all the statements that were added by the gimplification
helpers. */
if (!gimple_seq_empty_p (*pre_p))
annotate_all_with_location_after (*pre_p, pre_last_gsi, input_location);
if (!gimple_seq_empty_p (*post_p))
annotate_all_with_location_after (*post_p, post_last_gsi,
input_location);
goto out;
}
#ifdef ENABLE_GIMPLE_CHECKING
if (*expr_p)
{
enum tree_code code = TREE_CODE (*expr_p);
/* These expressions should already be in gimple IR form. */
gcc_assert (code != MODIFY_EXPR
&& code != ASM_EXPR
&& code != BIND_EXPR
&& code != CATCH_EXPR
&& (code != COND_EXPR || gimplify_ctxp->allow_rhs_cond_expr)
&& code != EH_FILTER_EXPR
&& code != GOTO_EXPR
&& code != LABEL_EXPR
&& code != LOOP_EXPR
&& code != SWITCH_EXPR
&& code != TRY_FINALLY_EXPR
&& code != OACC_PARALLEL
&& code != OACC_KERNELS
&& code != OACC_DATA
&& code != OACC_HOST_DATA
&& code != OACC_DECLARE
&& code != OACC_UPDATE
&& code != OACC_ENTER_DATA
&& code != OACC_EXIT_DATA
&& code != OACC_CACHE
&& code != OMP_CRITICAL
&& code != OMP_FOR
&& code != OACC_LOOP
&& code != OMP_MASTER
&& code != OMP_TASKGROUP
&& code != OMP_ORDERED
&& code != OMP_PARALLEL
&& code != OMP_SECTIONS
&& code != OMP_SECTION
&& code != OMP_SINGLE);
}
#endif
/* Otherwise we're gimplifying a subexpression, so the resulting
value is interesting. If it's a valid operand that matches
GIMPLE_TEST_F, we're done. Unless we are handling some
post-effects internally; if that's the case, we need to copy into
a temporary before adding the post-effects to POST_P. */
if (gimple_seq_empty_p (internal_post) && (*gimple_test_f) (*expr_p))
goto out;
/* Otherwise, we need to create a new temporary for the gimplified
expression. */
/* We can't return an lvalue if we have an internal postqueue. The
object the lvalue refers to would (probably) be modified by the
postqueue; we need to copy the value out first, which means an
rvalue. */
if ((fallback & fb_lvalue)
&& gimple_seq_empty_p (internal_post)
&& is_gimple_addressable (*expr_p))
{
/* An lvalue will do. Take the address of the expression, store it
in a temporary, and replace the expression with an INDIRECT_REF of
that temporary. */
tmp = build_fold_addr_expr_loc (input_location, *expr_p);
gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue);
*expr_p = build_simple_mem_ref (tmp);
}
else if ((fallback & fb_rvalue) && is_gimple_reg_rhs_or_call (*expr_p))
{
/* An rvalue will do. Assign the gimplified expression into a
new temporary TMP and replace the original expression with
TMP. First, make sure that the expression has a type so that
it can be assigned into a temporary. */
gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p)));
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
else
{
#ifdef ENABLE_GIMPLE_CHECKING
if (!(fallback & fb_mayfail))
{
fprintf (stderr, "gimplification failed:\n");
print_generic_expr (stderr, *expr_p, 0);
debug_tree (*expr_p);
internal_error ("gimplification failed");
}
#endif
gcc_assert (fallback & fb_mayfail);
/* If this is an asm statement, and the user asked for the
impossible, don't die. Fail and let gimplify_asm_expr
issue an error. */
ret = GS_ERROR;
goto out;
}
/* Make sure the temporary matches our predicate. */
gcc_assert ((*gimple_test_f) (*expr_p));
if (!gimple_seq_empty_p (internal_post))
{
annotate_all_with_location (internal_post, input_location);
gimplify_seq_add_seq (pre_p, internal_post);
}
out:
input_location = saved_location;
return ret;
}
/* Look through TYPE for variable-sized objects and gimplify each such
size that we find. Add to LIST_P any statements generated. */
void
gimplify_type_sizes (tree type, gimple_seq *list_p)
{
tree field, t;
if (type == NULL || type == error_mark_node)
return;
/* We first do the main variant, then copy into any other variants. */
type = TYPE_MAIN_VARIANT (type);
/* Avoid infinite recursion. */
if (TYPE_SIZES_GIMPLIFIED (type))
return;
TYPE_SIZES_GIMPLIFIED (type) = 1;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p);
gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type);
TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type);
}
break;
case ARRAY_TYPE:
/* These types may not have declarations, so handle them here. */
gimplify_type_sizes (TREE_TYPE (type), list_p);
gimplify_type_sizes (TYPE_DOMAIN (type), list_p);
/* Ensure VLA bounds aren't removed, for -O0 they should be variables
with assigned stack slots, for -O1+ -g they should be tracked
by VTA. */
if (!(TYPE_NAME (type)
&& TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
&& DECL_IGNORED_P (TYPE_NAME (type)))
&& TYPE_DOMAIN (type)
&& INTEGRAL_TYPE_P (TYPE_DOMAIN (type)))
{
t = TYPE_MIN_VALUE (TYPE_DOMAIN (type));
if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
t = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
if (t && TREE_CODE (t) == VAR_DECL && DECL_ARTIFICIAL (t))
DECL_IGNORED_P (t) = 0;
}
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p);
gimplify_one_sizepos (&DECL_SIZE (field), list_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (field), list_p);
gimplify_type_sizes (TREE_TYPE (field), list_p);
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
/* We used to recurse on the pointed-to type here, which turned out to
be incorrect because its definition might refer to variables not
yet initialized at this point if a forward declaration is involved.
It was actually useful for anonymous pointed-to types to ensure
that the sizes evaluation dominates every possible later use of the
values. Restricting to such types here would be safe since there
is no possible forward declaration around, but would introduce an
undesirable middle-end semantic to anonymity. We then defer to
front-ends the responsibility of ensuring that the sizes are
evaluated both early and late enough, e.g. by attaching artificial
type declarations to the tree. */
break;
default:
break;
}
gimplify_one_sizepos (&TYPE_SIZE (type), list_p);
gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_SIZE (t) = TYPE_SIZE (type);
TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type);
TYPE_SIZES_GIMPLIFIED (t) = 1;
}
}
/* A subroutine of gimplify_type_sizes to make sure that *EXPR_P,
a size or position, has had all of its SAVE_EXPRs evaluated.
We add any required statements to *STMT_P. */
void
gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p)
{
tree expr = *expr_p;
/* We don't do anything if the value isn't there, is constant, or contains
A PLACEHOLDER_EXPR. We also don't want to do anything if it's already
a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier
will want to replace it with a new variable, but that will cause problems
if this type is from outside the function. It's OK to have that here. */
if (is_gimple_sizepos (expr))
return;
*expr_p = unshare_expr (expr);
gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue);
}
/* Gimplify the body of statements of FNDECL and return a GIMPLE_BIND node
containing the sequence of corresponding GIMPLE statements. If DO_PARMS
is true, also gimplify the parameters. */
gbind *
gimplify_body (tree fndecl, bool do_parms)
{
location_t saved_location = input_location;
gimple_seq parm_stmts, seq;
gimple outer_stmt;
gbind *outer_bind;
struct cgraph_node *cgn;
timevar_push (TV_TREE_GIMPLIFY);
/* Initialize for optimize_insn_for_s{ize,peed}_p possibly called during
gimplification. */
default_rtl_profile ();
gcc_assert (gimplify_ctxp == NULL);
push_gimplify_context ();
if (flag_openacc || flag_openmp)
{
gcc_assert (gimplify_omp_ctxp == NULL);
if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl)))
gimplify_omp_ctxp = new_omp_context (ORT_TARGET);
}
/* Unshare most shared trees in the body and in that of any nested functions.
It would seem we don't have to do this for nested functions because
they are supposed to be output and then the outer function gimplified
first, but the g++ front end doesn't always do it that way. */
unshare_body (fndecl);
unvisit_body (fndecl);
cgn = cgraph_node::get (fndecl);
if (cgn && cgn->origin)
nonlocal_vlas = new hash_set<tree>;
/* Make sure input_location isn't set to something weird. */
input_location = DECL_SOURCE_LOCATION (fndecl);
/* Resolve callee-copies. This has to be done before processing
the body so that DECL_VALUE_EXPR gets processed correctly. */
parm_stmts = do_parms ? gimplify_parameters () : NULL;
/* Gimplify the function's body. */
seq = NULL;
gimplify_stmt (&DECL_SAVED_TREE (fndecl), &seq);
outer_stmt = gimple_seq_first_stmt (seq);
if (!outer_stmt)
{
outer_stmt = gimple_build_nop ();
gimplify_seq_add_stmt (&seq, outer_stmt);
}
/* The body must contain exactly one statement, a GIMPLE_BIND. If this is
not the case, wrap everything in a GIMPLE_BIND to make it so. */
if (gimple_code (outer_stmt) == GIMPLE_BIND
&& gimple_seq_first (seq) == gimple_seq_last (seq))
outer_bind = as_a <gbind *> (outer_stmt);
else
outer_bind = gimple_build_bind (NULL_TREE, seq, NULL);
DECL_SAVED_TREE (fndecl) = NULL_TREE;
/* If we had callee-copies statements, insert them at the beginning
of the function and clear DECL_VALUE_EXPR_P on the parameters. */
if (!gimple_seq_empty_p (parm_stmts))
{
tree parm;
gimplify_seq_add_seq (&parm_stmts, gimple_bind_body (outer_bind));
gimple_bind_set_body (outer_bind, parm_stmts);
for (parm = DECL_ARGUMENTS (current_function_decl);
parm; parm = DECL_CHAIN (parm))
if (DECL_HAS_VALUE_EXPR_P (parm))
{
DECL_HAS_VALUE_EXPR_P (parm) = 0;
DECL_IGNORED_P (parm) = 0;
}
}
if (nonlocal_vlas)
{
if (nonlocal_vla_vars)
{
/* tree-nested.c may later on call declare_vars (..., true);
which relies on BLOCK_VARS chain to be the tail of the
gimple_bind_vars chain. Ensure we don't violate that
assumption. */
if (gimple_bind_block (outer_bind)
== DECL_INITIAL (current_function_decl))
declare_vars (nonlocal_vla_vars, outer_bind, true);
else
BLOCK_VARS (DECL_INITIAL (current_function_decl))
= chainon (BLOCK_VARS (DECL_INITIAL (current_function_decl)),
nonlocal_vla_vars);
nonlocal_vla_vars = NULL_TREE;
}
delete nonlocal_vlas;
nonlocal_vlas = NULL;
}
if ((flag_openacc || flag_openmp || flag_openmp_simd)
&& gimplify_omp_ctxp)
{
delete_omp_context (gimplify_omp_ctxp);
gimplify_omp_ctxp = NULL;
}
pop_gimplify_context (outer_bind);
gcc_assert (gimplify_ctxp == NULL);
#ifdef ENABLE_CHECKING
if (!seen_error ())
verify_gimple_in_seq (gimple_bind_body (outer_bind));
#endif
timevar_pop (TV_TREE_GIMPLIFY);
input_location = saved_location;
return outer_bind;
}
typedef char *char_p; /* For DEF_VEC_P. */
/* Return whether we should exclude FNDECL from instrumentation. */
static bool
flag_instrument_functions_exclude_p (tree fndecl)
{
vec<char_p> *v;
v = (vec<char_p> *) flag_instrument_functions_exclude_functions;
if (v && v->length () > 0)
{
const char *name;
int i;
char *s;
name = lang_hooks.decl_printable_name (fndecl, 0);
FOR_EACH_VEC_ELT (*v, i, s)
if (strstr (name, s) != NULL)
return true;
}
v = (vec<char_p> *) flag_instrument_functions_exclude_files;
if (v && v->length () > 0)
{
const char *name;
int i;
char *s;
name = DECL_SOURCE_FILE (fndecl);
FOR_EACH_VEC_ELT (*v, i, s)
if (strstr (name, s) != NULL)
return true;
}
return false;
}
/* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL
node for the function we want to gimplify.
Return the sequence of GIMPLE statements corresponding to the body
of FNDECL. */
void
gimplify_function_tree (tree fndecl)
{
tree parm, ret;
gimple_seq seq;
gbind *bind;
gcc_assert (!gimple_body (fndecl));
if (DECL_STRUCT_FUNCTION (fndecl))
push_cfun (DECL_STRUCT_FUNCTION (fndecl));
else
push_struct_function (fndecl);
for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = DECL_CHAIN (parm))
{
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (parm)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (parm)
&& !needs_to_live_in_memory (parm))
DECL_GIMPLE_REG_P (parm) = 1;
}
ret = DECL_RESULT (fndecl);
if ((TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (ret)) == VECTOR_TYPE)
&& !needs_to_live_in_memory (ret))
DECL_GIMPLE_REG_P (ret) = 1;
bind = gimplify_body (fndecl, true);
/* The tree body of the function is no longer needed, replace it
with the new GIMPLE body. */
seq = NULL;
gimple_seq_add_stmt (&seq, bind);
gimple_set_body (fndecl, seq);
/* If we're instrumenting function entry/exit, then prepend the call to
the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to
catch the exit hook. */
/* ??? Add some way to ignore exceptions for this TFE. */
if (flag_instrument_function_entry_exit
&& !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)
&& !flag_instrument_functions_exclude_p (fndecl))
{
tree x;
gbind *new_bind;
gimple tf;
gimple_seq cleanup = NULL, body = NULL;
tree tmp_var;
gcall *call;
x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
call = gimple_build_call (x, 1, integer_zero_node);
tmp_var = create_tmp_var (ptr_type_node, "return_addr");
gimple_call_set_lhs (call, tmp_var);
gimplify_seq_add_stmt (&cleanup, call);
x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_EXIT);
call = gimple_build_call (x, 2,
build_fold_addr_expr (current_function_decl),
tmp_var);
gimplify_seq_add_stmt (&cleanup, call);
tf = gimple_build_try (seq, cleanup, GIMPLE_TRY_FINALLY);
x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS);
call = gimple_build_call (x, 1, integer_zero_node);
tmp_var = create_tmp_var (ptr_type_node, "return_addr");
gimple_call_set_lhs (call, tmp_var);
gimplify_seq_add_stmt (&body, call);
x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_ENTER);
call = gimple_build_call (x, 2,
build_fold_addr_expr (current_function_decl),
tmp_var);
gimplify_seq_add_stmt (&body, call);
gimplify_seq_add_stmt (&body, tf);
new_bind = gimple_build_bind (NULL, body, gimple_bind_block (bind));
/* Clear the block for BIND, since it is no longer directly inside
the function, but within a try block. */
gimple_bind_set_block (bind, NULL);
/* Replace the current function body with the body
wrapped in the try/finally TF. */
seq = NULL;
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
bind = new_bind;
}
if ((flag_sanitize & SANITIZE_THREAD) != 0
&& !lookup_attribute ("no_sanitize_thread", DECL_ATTRIBUTES (fndecl)))
{
gcall *call = gimple_build_call_internal (IFN_TSAN_FUNC_EXIT, 0);
gimple tf = gimple_build_try (seq, call, GIMPLE_TRY_FINALLY);
gbind *new_bind = gimple_build_bind (NULL, tf, gimple_bind_block (bind));
/* Clear the block for BIND, since it is no longer directly inside
the function, but within a try block. */
gimple_bind_set_block (bind, NULL);
/* Replace the current function body with the body
wrapped in the try/finally TF. */
seq = NULL;
gimple_seq_add_stmt (&seq, new_bind);
gimple_set_body (fndecl, seq);
}
DECL_SAVED_TREE (fndecl) = NULL_TREE;
cfun->curr_properties = PROP_gimple_any;
pop_cfun ();
}
/* Return a dummy expression of type TYPE in order to keep going after an
error. */
static tree
dummy_object (tree type)
{
tree t = build_int_cst (build_pointer_type (type), 0);
return build2 (MEM_REF, type, t, t);
}
/* Gimplify __builtin_va_arg, aka VA_ARG_EXPR, which is not really a
builtin function, but a very special sort of operator. */
enum gimplify_status
gimplify_va_arg_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p)
{
tree promoted_type, have_va_type;
tree valist = TREE_OPERAND (*expr_p, 0);
tree type = TREE_TYPE (*expr_p);
tree t;
location_t loc = EXPR_LOCATION (*expr_p);
/* Verify that valist is of the proper type. */
have_va_type = TREE_TYPE (valist);
if (have_va_type == error_mark_node)
return GS_ERROR;
have_va_type = targetm.canonical_va_list_type (have_va_type);
if (have_va_type == NULL_TREE)
{
error_at (loc, "first argument to %<va_arg%> not of type %<va_list%>");
return GS_ERROR;
}
/* Generate a diagnostic for requesting data of a type that cannot
be passed through `...' due to type promotion at the call site. */
if ((promoted_type = lang_hooks.types.type_promotes_to (type))
!= type)
{
static bool gave_help;
bool warned;
/* Unfortunately, this is merely undefined, rather than a constraint
violation, so we cannot make this an error. If this call is never
executed, the program is still strictly conforming. */
warned = warning_at (loc, 0,
"%qT is promoted to %qT when passed through %<...%>",
type, promoted_type);
if (!gave_help && warned)
{
gave_help = true;
inform (loc, "(so you should pass %qT not %qT to %<va_arg%>)",
promoted_type, type);
}
/* We can, however, treat "undefined" any way we please.
Call abort to encourage the user to fix the program. */
if (warned)
inform (loc, "if this code is reached, the program will abort");
/* Before the abort, allow the evaluation of the va_list
expression to exit or longjmp. */
gimplify_and_add (valist, pre_p);
t = build_call_expr_loc (loc,
builtin_decl_implicit (BUILT_IN_TRAP), 0);
gimplify_and_add (t, pre_p);
/* This is dead code, but go ahead and finish so that the
mode of the result comes out right. */
*expr_p = dummy_object (type);
return GS_ALL_DONE;
}
else
{
/* Make it easier for the backends by protecting the valist argument
from multiple evaluations. */
if (TREE_CODE (have_va_type) == ARRAY_TYPE)
{
/* For this case, the backends will be expecting a pointer to
TREE_TYPE (abi), but it's possible we've
actually been given an array (an actual TARGET_FN_ABI_VA_LIST).
So fix it. */
if (TREE_CODE (TREE_TYPE (valist)) == ARRAY_TYPE)
{
tree p1 = build_pointer_type (TREE_TYPE (have_va_type));
valist = fold_convert_loc (loc, p1,
build_fold_addr_expr_loc (loc, valist));
}
gimplify_expr (&valist, pre_p, post_p, is_gimple_val, fb_rvalue);
}
else
gimplify_expr (&valist, pre_p, post_p, is_gimple_min_lval, fb_lvalue);
if (!targetm.gimplify_va_arg_expr)
/* FIXME: Once most targets are converted we should merely
assert this is non-null. */
return GS_ALL_DONE;
*expr_p = targetm.gimplify_va_arg_expr (valist, type, pre_p, post_p);
return GS_OK;
}
}
/* Build a new GIMPLE_ASSIGN tuple and append it to the end of *SEQ_P.
DST/SRC are the destination and source respectively. You can pass
ungimplified trees in DST or SRC, in which case they will be
converted to a gimple operand if necessary.
This function returns the newly created GIMPLE_ASSIGN tuple. */
gimple
gimplify_assign (tree dst, tree src, gimple_seq *seq_p)
{
tree t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
gimplify_and_add (t, seq_p);
ggc_free (t);
return gimple_seq_last_stmt (*seq_p);
}
inline hashval_t
gimplify_hasher::hash (const value_type *p)
{
tree t = p->val;
return iterative_hash_expr (t, 0);
}
inline bool
gimplify_hasher::equal (const value_type *p1, const compare_type *p2)
{
tree t1 = p1->val;
tree t2 = p2->val;
enum tree_code code = TREE_CODE (t1);
if (TREE_CODE (t2) != code
|| TREE_TYPE (t1) != TREE_TYPE (t2))
return false;
if (!operand_equal_p (t1, t2, 0))
return false;
#ifdef ENABLE_CHECKING
/* Only allow them to compare equal if they also hash equal; otherwise
results are nondeterminate, and we fail bootstrap comparison. */
gcc_assert (hash (p1) == hash (p2));
#endif
return true;
}
|
sir_omp.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "timing.h"
void initialise(double ** S, double ** I, double ** R, int Xmax, int Ymax)
{
/*
Assume epicenter of spread is NY, which has roughly 15% horizontal width and 20% vertical length.
Assume that NY is hence [0.8, 0.95] horizontally and [0.15, 0.35] vertically.
*/
int NYx_low = 0.8 * Xmax;
int NYx_high = 0.95 * Xmax;
int NYy_low = 0.15 * Ymax;
int NYy_high = 0.35 * Ymax;
for (int i = 0; i < Ymax; ++i)
{
for (int j = 0; j < Xmax; ++j)
{
if ((NYy_low <= i) && (i < NYy_high) && (NYx_low <= j) && (j < NYx_high))
{
I[i][j] = 0.01;
S[i][j] = 0.99;
}
else
{
S[i][j] = 1.0;
}
}
}
}
void simulate(int nsweeps, double ** S, double ** I, double ** R, double beta,
double gamma, double dS, double dI, double dR, int Xmax, int Ymax)
{
/* Initialize tmp arrays */
double ** Stmp = calloc(Ymax, sizeof(double * ));
double ** Itmp = calloc(Ymax, sizeof(double * ));
double ** Rtmp = calloc(Ymax, sizeof(double * ));
for (int i = 0; i < Ymax; ++i)
{
Stmp[i] = calloc(Xmax, sizeof(double));
Itmp[i] = calloc(Xmax, sizeof(double));
Rtmp[i] = calloc(Xmax, sizeof(double));
}
/* Fill boundary conditions into tmp arrays */
for (int i = 0; i < Ymax; ++i)
{
Stmp[i][0] = S[i][0];
Stmp[i][Xmax - 1] = S[i][Xmax - 1];
Itmp[i][0] = I[i][0];
Itmp[i][Xmax - 1] = I[i][Xmax - 1];
Rtmp[i][0] = R[i][0];
Rtmp[i][Xmax - 1] = R[i][Xmax - 1];
}
for (int j = 0; j < Xmax; ++j)
{
Stmp[0][j] = S[0][j];
Stmp[Ymax - 1][j] = S[Ymax - 1][j];
Itmp[0][j] = I[0][j];
Itmp[Ymax - 1][j] = I[Ymax - 1][j];
Rtmp[0][j] = R[0][j];
Rtmp[Ymax - 1][j] = R[Ymax - 1][j];
}
int i, j, sweep;
#pragma omp parallel
{
for (sweep = 0; sweep < nsweeps; sweep += 2)
{
/* Old data in sir; new data in sirtmp */
#pragma omp for collapse(2)
{
for (i = 1; i < Ymax - 1; ++i)
{
for (j = 1; j < Xmax - 1; ++j)
{
Stmp[i][j] = S[i][j] - beta * S[i][j] * I[i][j]
+ dS * (S[i + 1][j] + S[i - 1][j] - 4 * S[i][j] + S[i][j + 1] + S[i][j - 1]);
Itmp[i][j] = I[i][j] + beta * S[i][j] * I[i][j] - gamma * I[i][j]
+ dI * (I[i + 1][j] + I[i - 1][j] - 4 * I[i][j] + I[i][j + 1] + I[i][j - 1]);
Rtmp[i][j] = R[i][j] + gamma * I[i][j]
+ dR * (R[i + 1][j] + R[i - 1][j] - 4 * R[i][j] + R[i][j + 1] + R[i][j - 1]);
}
}
}
/* Old data in sirtmp; new data in sir */
#pragma omp for collapse(2)
{
for (i = 1; i < Ymax - 1; ++i)
{
for (j = 1; j < Xmax - 1; ++j)
{
S[i][j] = Stmp[i][j] - beta * Stmp[i][j] * Itmp[i][j]
+ dS * (Stmp[i + 1][j] + Stmp[i - 1][j] - 4 * Stmp[i][j] + Stmp[i][j + 1] + Stmp[i][j - 1]);
I[i][j] = Itmp[i][j] + beta * Stmp[i][j] * Itmp[i][j] - gamma * Itmp[i][j]
+ dI * (Itmp[i + 1][j] + Itmp[i - 1][j] - 4 * Itmp[i][j] + Itmp[i][j + 1] + Itmp[i][j - 1]);
R[i][j] = Rtmp[i][j] + gamma * Itmp[i][j]
+ dR * (Rtmp[i + 1][j] + Rtmp[i - 1][j] - 4 * Rtmp[i][j] + Rtmp[i][j + 1] + Rtmp[i][j - 1]);
}
}
}
}
}
free(Stmp);
free(Itmp);
free(Rtmp);
}
int main(int argc, char ** argv)
{
int Xmax, Ymax, nsteps;
double beta, gamma, dS, dI, dR;
double ** S, ** I, ** R;
timing_t tstart, tend;
/* Process arguments */
Xmax = (argc > 1) ? atoi(argv[1]) : 100;
Ymax = (argc > 2) ? atoi(argv[2]) : 100;
nsteps = (argc > 3) ? atoi(argv[3]) : 100;
beta = (argc > 4) ? atoi(argv[4]) : 0.2;
gamma = (argc > 5) ? atoi(argv[5]) : 0.01;
dS = (argc > 6) ? atoi(argv[6]) : 0.01;
dI = (argc > 7) ? atoi(argv[7]) : 0.01;
dR = (argc > 8) ? atoi(argv[8]) : 0.01;
/* Allocate and initialize arrays */
/* Initialize tmp arrays */
S = calloc(Ymax, sizeof(double * ));
I = calloc(Ymax, sizeof(double * ));
R = calloc(Ymax, sizeof(double * ));
for (int i = 0; i < Ymax; ++i)
{
S[i] = calloc(Xmax, sizeof(double));
I[i] = calloc(Xmax, sizeof(double));
R[i] = calloc(Xmax, sizeof(double));
}
initialise(S, I, R, Xmax, Ymax);
/* Run the solver */
get_time( & tstart);
simulate(nsteps, S, I, R, beta, gamma, dS, dI, dR, Xmax, Ymax);
get_time( & tend);
printf("Xmax: %d\n"
"Ymax: %d\n"
"timesteps: %d\n"
"Elapsed time: %g s\n",
Xmax, Ymax, nsteps, timespec_diff(tstart, tend));
free(S);
free(I);
free(R);
return 0;
} |
decryptionOMP.c | #include "decryptionOMP.h"
Session* bruteForceOMP(int keyStart, int keyStop, int keyLength, char* cipherText, size_t cipherLength, GHashTable *wordsGHash) {
int bestCount = -1, matchs, i;
char *key, *decrypted;
// Init Session
Session *session = (Session*) malloc(sizeof(Session));
session->key = NULL;
session->decryptedMessage = NULL;
// Fixed number of thread for each process (4)
omp_set_num_threads(NUMBER_OF_THREADS);
#pragma omp parallel for
for (i = keyStart; i < keyStop; i++) {
// Get binary string representation fo key
key = decToBinary(i, keyLength);
// Try to decrypt the cipher with the current key
decrypted = cipher(key, keyLength, cipherText, cipherLength);
// Debug mode - Uncomman the next line to save encrypted file to use as an input
// encryptText(key, keyLength, cipherText, cipherLength);
// Check if the decrypted plaintext makes sense by matching it with the known words text
matchs = compare(decrypted, wordsGHash);
if (matchs > bestCount) {
// Allocate memory for encryption key & plaintext
session->key = (char*) malloc(sizeof(char) * keyLength);
session->decryptedMessage = (char*) malloc(MAX_TEXT_SIZE * sizeof(char));
// Save encryption key & plaintext
strcpy(session->key, key);
strcpy(session->decryptedMessage, decrypted);
session->match = matchs;
bestCount = matchs;
}
}
return session;
}
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _EdgeInfo
{
double
left,
right,
top,
bottom;
} EdgeInfo;
static double GetEdgeBackgroundFactor(const Image *image,
const CacheView *image_view,const GravityType gravity,const size_t width,
const size_t height,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
CacheView
*edge_view;
double
factor;
Image
*edge_image;
MagickPixelPacket
background,
pixel;
RectangleInfo
edge_geometry;
register const PixelPacket
*p;
ssize_t
y;
/*
Determine the percent of image background for this edge.
*/
switch (gravity)
{
case NorthWestGravity:
case NorthGravity:
default:
{
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
break;
}
case NorthEastGravity:
case EastGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
break;
}
case SouthEastGravity:
case SouthGravity:
{
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,
(ssize_t) image->rows-1,1,1,exception);
break;
}
case SouthWestGravity:
case WestGravity:
{
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
break;
}
}
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,p,(IndexPacket *) NULL,&background);
edge_geometry.width=width;
edge_geometry.height=height;
edge_geometry.x=x_offset;
edge_geometry.y=y_offset;
GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry);
edge_image=CropImage(image,&edge_geometry,exception);
if (edge_image == (Image *) NULL)
return(0.0);
factor=0.0;
GetMagickPixelPacket(edge_image,&pixel);
edge_view=AcquireVirtualCacheView(edge_image,exception);
for (y=0; y < (ssize_t) edge_image->rows; y++)
{
register ssize_t
x;
p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) edge_image->columns; x++)
{
SetMagickPixelPacket(edge_image,p,(IndexPacket *) NULL,&pixel);
if (IsMagickColorSimilar(&pixel,&background) == MagickFalse)
factor++;
p++;
}
}
factor/=((double) edge_image->columns*edge_image->rows);
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
return(factor);
}
static inline double GetMinEdgeBackgroundFactor(const EdgeInfo *edge)
{
double
factor;
factor=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top),
edge->bottom);
return(factor);
}
static RectangleInfo GetEdgeBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*edge_view;
const char
*artifact;
double
background_factor,
percent_background;
EdgeInfo
edge,
vertex;
Image
*edge_image;
RectangleInfo
bounds;
/*
Get the image bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
SetGeometry(image,&bounds);
edge_image=CloneImage(image,0,0,MagickTrue,exception);
if (edge_image == (Image *) NULL)
return(bounds);
(void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page);
memset(&vertex,0,sizeof(vertex));
edge_view=AcquireVirtualCacheView(edge_image,exception);
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,WestGravity,
1,0,0,0,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,EastGravity,
1,0,0,0,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,NorthGravity,
0,1,0,0,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,SouthGravity,
0,1,0,0,exception);
percent_background=1.0;
artifact=GetImageArtifact(edge_image,"trim:percent-background");
if (artifact != (const char *) NULL)
percent_background=StringToDouble(artifact,(char **) NULL)/100.0;
percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon),
1.0);
background_factor=GetMinEdgeBackgroundFactor(&edge);
for ( ; background_factor < percent_background;
background_factor=GetMinEdgeBackgroundFactor(&edge))
{
if ((bounds.width == 0) || (bounds.height == 0))
break;
if (fabs(edge.left-background_factor) < MagickEpsilon)
{
/*
Trim left edge.
*/
vertex.left++;
bounds.width--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.right-background_factor) < MagickEpsilon)
{
/*
Trim right edge.
*/
vertex.right++;
bounds.width--;
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
if (fabs(edge.top-background_factor) < MagickEpsilon)
{
/*
Trim top edge.
*/
vertex.top++;
bounds.height--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.top=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
continue;
}
if (fabs(edge.bottom-background_factor) < MagickEpsilon)
{
/*
Trim bottom edge.
*/
vertex.bottom++;
bounds.height--;
edge.left=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t)
vertex.top,exception);
edge.right=GetEdgeBackgroundFactor(edge_image,edge_view,
NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t)
vertex.top,exception);
edge.bottom=GetEdgeBackgroundFactor(edge_image,edge_view,
SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t)
vertex.bottom,exception);
continue;
}
}
edge_view=DestroyCacheView(edge_view);
edge_image=DestroyImage(edge_image);
bounds.x=(ssize_t) vertex.left;
bounds.y=(ssize_t) vertex.top;
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return(bounds);
}
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"trim:percent-background");
if (artifact != (const char *) NULL)
return(GetEdgeBoundingBox(image,exception));
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((channel & RedChannel) != 0)
if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0))
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse)
atDepth=MagickTrue;
if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->matte != MagickFalse))
type=GrayscaleMatteType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
magick_unreferenced(exception);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
magick_unreferenced(image);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].red),range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].green),range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].blue),range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].opacity),range),
range);
}
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelRed(q)),range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelGreen(q)),range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelBlue(q)),range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelOpacity(q)),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType,
% PaletteMatteType, TrueColorType, TrueColorMatteType,
% ColorSeparationType, ColorSeparationMatteType, OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace);
(void) NormalizeImage(image);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
quantize_info->dither_method=NoDitherMethod;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
|
QuadNodePolarEuclid.h | /*
* QuadNodePolarEuclid.h
*
* Created on: 21.05.2014
* Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu)
*
* Note: This is similar enough to QuadNode.h that one could merge these two classes.
*/
#ifndef QUADNODEPOLAREUCLID_H_
#define QUADNODEPOLAREUCLID_H_
#include <vector>
#include <algorithm>
#include <functional>
#include <assert.h>
#include "../../auxiliary/Log.h"
#include "../../geometric/HyperbolicSpace.h"
using std::vector;
using std::min;
using std::max;
using std::cos;
namespace NetworKit {
template <class T>
class QuadNodePolarEuclid {
friend class QuadTreeGTest;
private:
double leftAngle;
double minR;
double rightAngle;
double maxR;
Point2D<double> a,b,c,d;
unsigned capacity;
static const unsigned coarsenLimit = 4;
count subTreeSize;
std::vector<T> content;
std::vector<Point2D<double> > positions;
std::vector<double> angles;
std::vector<double> radii;
bool isLeaf;
bool splitTheoretical;
double balance;
index ID;
double lowerBoundR;
public:
std::vector<QuadNodePolarEuclid> children;
QuadNodePolarEuclid() {
//This should never be called.
leftAngle = 0;
rightAngle = 0;
minR = 0;
maxR = 0;
capacity = 20;
isLeaf = true;
subTreeSize = 0;
balance = 0.5;
splitTheoretical = false;
lowerBoundR = maxR;
ID = 0;
}
/**
* Construct a QuadNode for polar coordinates.
*
*
* @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\pi
* @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\pi
* @param minR Minimal radial coordinate of region, between 0 and 1
* @param maxR Maximal radial coordinate of region, between 0 and 1
* @param capacity Number of points a leaf cell can store before splitting
* @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0
* @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times
* @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true
* @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use
*
*/
QuadNodePolarEuclid(double leftAngle, double minR, double rightAngle, double maxR, unsigned capacity = 1000, bool splitTheoretical = false, double balance = 0.5) {
if (balance <= 0 || balance >= 1) throw std::runtime_error("Quadtree balance parameter must be between 0 and 1.");
this->leftAngle = leftAngle;
this->minR = minR;
this->maxR = maxR;
this->rightAngle = rightAngle;
this->a = HyperbolicSpace::polarToCartesian(leftAngle, minR);
this->b = HyperbolicSpace::polarToCartesian(rightAngle, minR);
this->c = HyperbolicSpace::polarToCartesian(rightAngle, maxR);
this->d = HyperbolicSpace::polarToCartesian(leftAngle, maxR);
this->capacity = capacity;
this->splitTheoretical = splitTheoretical;
this->balance = balance;
this->lowerBoundR = maxR;
this->ID = 0;
isLeaf = true;
subTreeSize = 0;
}
void split() {
assert(isLeaf);
//heavy lifting: split up!
double middleAngle, middleR;
if (splitTheoretical) {
//Euclidean space is distributed equally
middleAngle = (rightAngle - leftAngle) / 2 + leftAngle;
middleR = pow(maxR*maxR*(1-balance)+minR*minR*balance, 0.5);
} else {
//median of points
vector<double> sortedAngles = angles;
std::sort(sortedAngles.begin(), sortedAngles.end());
middleAngle = sortedAngles[sortedAngles.size()/2];
vector<double> sortedRadii = radii;
std::sort(sortedRadii.begin(), sortedRadii.end());
middleR = sortedRadii[sortedRadii.size()/2];
}
assert(middleR < maxR);
assert(middleR > minR);
QuadNodePolarEuclid southwest(leftAngle, minR, middleAngle, middleR, capacity, splitTheoretical, balance);
QuadNodePolarEuclid southeast(middleAngle, minR, rightAngle, middleR, capacity, splitTheoretical, balance);
QuadNodePolarEuclid northwest(leftAngle, middleR, middleAngle, maxR, capacity, splitTheoretical, balance);
QuadNodePolarEuclid northeast(middleAngle, middleR, rightAngle, maxR, capacity, splitTheoretical, balance);
children = {southwest, southeast, northwest, northeast};
isLeaf = false;
}
/**
* Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full
*
* @param input arbitrary content, in our case an index
* @param angle angular coordinate of point, between 0 and 2 pi.
* @param R radial coordinate of point, between 0 and 1.
*/
void addContent(T input, double angle, double R) {
assert(this->responsible(angle, R));
if (lowerBoundR > R) lowerBoundR = R;
if (isLeaf) {
if (content.size() + 1 < capacity) {
content.push_back(input);
angles.push_back(angle);
radii.push_back(R);
Point2D<double> pos = HyperbolicSpace::polarToCartesian(angle, R);
positions.push_back(pos);
} else {
split();
for (index i = 0; i < content.size(); i++) {
this->addContent(content[i], angles[i], radii[i]);
}
assert(subTreeSize == content.size());//we have added everything twice
subTreeSize = content.size();
content.clear();
angles.clear();
radii.clear();
positions.clear();
this->addContent(input, angle, R);
}
}
else {
assert(children.size() > 0);
for (index i = 0; i < children.size(); i++) {
if (children[i].responsible(angle, R)) {
children[i].addContent(input, angle, R);
break;
}
}
subTreeSize++;
}
}
/**
* Remove content at polar coordinates (angle, R). May cause coarsening of the quadtree
*
* @param input Content to be removed
* @param angle Angular coordinate
* @param R Radial coordinate
*
* @return True if content was found and removed, false otherwise
*/
bool removeContent(T input, double angle, double R) {
if (!responsible(angle, R)) return false;
if (isLeaf) {
index i = 0;
for (; i < content.size(); i++) {
if (content[i] == input) break;
}
if (i < content.size()) {
assert(angles[i] == angle);
assert(radii[i] == R);
//remove element
content.erase(content.begin()+i);
positions.erase(positions.begin()+i);
angles.erase(angles.begin()+i);
radii.erase(radii.begin()+i);
return true;
} else {
return false;
}
}
else {
bool removed = false;
bool allLeaves = true;
assert(children.size() > 0);
for (index i = 0; i < children.size(); i++) {
if (!children[i].isLeaf) allLeaves = false;
if (children[i].removeContent(input, angle, R)) {
assert(!removed);
removed = true;
}
}
if (removed) subTreeSize--;
//coarsen?
if (removed && allLeaves && size() < coarsenLimit) {
//coarsen!!
//why not assert empty containers and then insert directly?
vector<T> allContent;
vector<Point2D<double> > allPositions;
vector<double> allAngles;
vector<double> allRadii;
for (index i = 0; i < children.size(); i++) {
allContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end());
allPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end());
allAngles.insert(allAngles.end(), children[i].angles.begin(), children[i].angles.end());
allRadii.insert(allRadii.end(), children[i].radii.begin(), children[i].radii.end());
}
assert(subTreeSize == allContent.size());
assert(subTreeSize == allPositions.size());
assert(subTreeSize == allAngles.size());
assert(subTreeSize == allRadii.size());
children.clear();
content.swap(allContent);
positions.swap(allPositions);
angles.swap(allAngles);
radii.swap(allRadii);
isLeaf = true;
}
return removed;
}
}
/**
* Check whether the region managed by this node lies outside of an Euclidean circle.
*
* @param query Center of the Euclidean query circle, given in Cartesian coordinates
* @param radius Radius of the Euclidean query circle
*
* @return True if the region managed by this node lies completely outside of the circle
*/
bool outOfReach(Point2D<double> query, double radius) const {
double phi, r;
HyperbolicSpace::cartesianToPolar(query, phi, r);
if (responsible(phi, r)) return false;
//get four edge points
double topDistance, bottomDistance, leftDistance, rightDistance;
if (phi < leftAngle || phi > rightAngle) {
topDistance = min(c.distance(query), d.distance(query));
} else {
topDistance = abs(r - maxR);
}
if (topDistance <= radius) return false;
if (phi < leftAngle || phi > rightAngle) {
bottomDistance = min(a.distance(query), b.distance(query));
} else {
bottomDistance = abs(r - minR);
}
if (bottomDistance <= radius) return false;
double minDistanceR = r*cos(abs(phi-leftAngle));
if (minDistanceR > minR && minDistanceR < maxR) {
leftDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR));
} else {
leftDistance = min(a.distance(query), d.distance(query));
}
if (leftDistance <= radius) return false;
minDistanceR = r*cos(abs(phi-rightAngle));
if (minDistanceR > minR && minDistanceR < maxR) {
rightDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR));
} else {
rightDistance = min(b.distance(query), c.distance(query));
}
if (rightDistance <= radius) return false;
return true;
}
/**
* Check whether the region managed by this node lies outside of an Euclidean circle.
* Functionality is the same as in the method above, but it takes polar coordinates instead of Cartesian ones
*
* @param angle_c Angular coordinate of the Euclidean query circle's center
* @param r_c Radial coordinate of the Euclidean query circle's center
* @param radius Radius of the Euclidean query circle
*
* @return True if the region managed by this node lies completely outside of the circle
*/
bool outOfReach(double angle_c, double r_c, double radius) const {
if (responsible(angle_c, r_c)) return false;
Point2D<double> query = HyperbolicSpace::polarToCartesian(angle_c, r_c);
return outOfReach(query, radius);
}
/**
* @param phi Angular coordinate of query point
* @param r_h radial coordinate of query point
*/
std::pair<double, double> EuclideanDistances(double phi, double r) const {
/**
* If the query point is not within the quadnode, the distance minimum is on the border.
* Need to check whether extremum is between corners.
*/
double maxDistance = 0;
double minDistance = std::numeric_limits<double>::max();
if (responsible(phi, r)) minDistance = 0;
auto euclidDistancePolar = [](double phi_a, double r_a, double phi_b, double r_b){
return pow(r_a*r_a+r_b*r_b-2*r_a*r_b*cos(phi_a-phi_b), 0.5);
};
auto updateMinMax = [&minDistance, &maxDistance, phi, r, euclidDistancePolar](double phi_b, double r_b){
double extremalValue = euclidDistancePolar(phi, r, phi_b, r_b);
//assert(extremalValue <= r + r_b);
maxDistance = std::max(extremalValue, maxDistance);
minDistance = std::min(minDistance, extremalValue);
};
/**
* angular boundaries
*/
//left
double extremum = r*cos(this->leftAngle - phi);
if (extremum < maxR && extremum > minR) {
updateMinMax(this->leftAngle, extremum);
}
//right
extremum = r*cos(this->rightAngle - phi);
if (extremum < maxR && extremum > minR) {
updateMinMax(this->leftAngle, extremum);
}
/**
* radial boundaries.
*/
if (phi > leftAngle && phi < rightAngle) {
updateMinMax(phi, maxR);
updateMinMax(phi, minR);
}
if (phi + PI > leftAngle && phi + PI < rightAngle) {
updateMinMax(phi + PI, maxR);
updateMinMax(phi + PI, minR);
}
if (phi - PI > leftAngle && phi -PI < rightAngle) {
updateMinMax(phi - PI, maxR);
updateMinMax(phi - PI, minR);
}
/**
* corners
*/
updateMinMax(leftAngle, maxR);
updateMinMax(rightAngle, maxR);
updateMinMax(leftAngle, minR);
updateMinMax(rightAngle, minR);
//double shortCutGainMax = maxR + r - maxDistance;
//assert(minDistance <= minR + r);
//assert(maxDistance <= maxR + r);
assert(minDistance < maxDistance);
return std::pair<double, double>(minDistance, maxDistance);
}
/**
* Does the point at (angle, r) fall inside the region managed by this QuadNode?
*
* @param angle Angular coordinate of input point
* @param r Radial coordinate of input points
*
* @return True if input point lies within the region of this QuadNode
*/
bool responsible(double angle, double r) const {
return (angle >= leftAngle && angle < rightAngle && r >= minR && r < maxR);
}
/**
* Get all Elements in this QuadNode or a descendant of it
*
* @return vector of content type T
*/
std::vector<T> getElements() const {
if (isLeaf) {
return content;
} else {
assert(content.size() == 0);
assert(angles.size() == 0);
assert(radii.size() == 0);
vector<T> result;
for (index i = 0; i < children.size(); i++) {
std::vector<T> subresult = children[i].getElements();
result.insert(result.end(), subresult.begin(), subresult.end());
}
return result;
}
}
void getCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const {
assert(angles.size() == radii.size());
if (isLeaf) {
anglesContainer.insert(anglesContainer.end(), angles.begin(), angles.end());
radiiContainer.insert(radiiContainer.end(), radii.begin(), radii.end());
}
else {
assert(content.size() == 0);
assert(angles.size() == 0);
assert(radii.size() == 0);
for (index i = 0; i < children.size(); i++) {
children[i].getCoordinates(anglesContainer, radiiContainer);
}
}
}
/**
* Main query method, get points lying in a Euclidean circle around the center point.
* Optional limits can be given to get a different result or to reduce unnecessary comparisons
*
* Elements are pushed onto a vector which is a required argument. This is done to reduce copying
*
* Safe to call in parallel if diagnostics are disabled
*
* @param center Center of the query circle
* @param radius Radius of the query circle
* @param result Reference to the vector where the results will be stored
* @param minAngle Optional value for the minimum angular coordinate of the query region
* @param maxAngle Optional value for the maximum angular coordinate of the query region
* @param lowR Optional value for the minimum radial coordinate of the query region
* @param highR Optional value for the maximum radial coordinate of the query region
*/
void getElementsInEuclideanCircle(Point2D<double> center, double radius, vector<T> &result, double minAngle=0, double maxAngle=2*PI, double lowR=0, double highR = 1) const {
if (minAngle >= rightAngle || maxAngle <= leftAngle || lowR >= maxR || highR < lowerBoundR) return;
if (outOfReach(center, radius)) {
return;
}
if (isLeaf) {
const double rsq = radius*radius;
const double queryX = center[0];
const double queryY = center[1];
const count cSize = content.size();
for (int i=0; i < cSize; i++) {
const double deltaX = positions[i].getX() - queryX;
const double deltaY = positions[i].getY() - queryY;
if (deltaX*deltaX + deltaY*deltaY < rsq) {
result.push_back(content[i]);
}
}
} else {
for (index i = 0; i < children.size(); i++) {
children[i].getElementsInEuclideanCircle(center, radius, result, minAngle, maxAngle, lowR, highR);
}
}
}
count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, bool suppressLeft, vector<T> &result) const {
double phi_q, r_q;
HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q);
if (suppressLeft && phi_q > rightAngle) return 0;
TRACE("Getting Euclidean distances");
auto distancePair = EuclideanDistances(phi_q, r_q);
double probUB = prob(distancePair.first);
double probLB = prob(distancePair.second);
assert(probLB <= probUB);
if (probUB > 0.5) probUB = 1;//if we are going to take every second element anyway, no use in calculating expensive jumps
if (probUB == 0) return 0;
//TODO: return whole if probLB == 1
double probdenom = std::log(1-probUB);
if (probdenom == 0) {
DEBUG(probUB, " not zero, but too small too process. Ignoring.");
return 0;
}
TRACE("probUB: ", probUB, ", probdenom: ", probdenom);
count expectedNeighbours = probUB*size();
count candidatesTested = 0;
if (isLeaf) {
const count lsize = content.size();
TRACE("Leaf of size ", lsize);
for (index i = 0; i < lsize; i++) {
//jump!
if (probUB < 1) {
double random = Aux::Random::real();
double delta = std::log(random) / probdenom;
assert(delta == delta);
assert(delta >= 0);
i += delta;
if (i >= lsize) break;
TRACE("Jumped with delta ", delta, " arrived at ", i);
}
assert(i >= 0);
//see where we've arrived
candidatesTested++;
double distance = positions[i].distance(euQuery);
double q = prob(distance);
q = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities
assert(q <= 1);
assert(q >= 0);
//accept?
double acc = Aux::Random::real();
if (acc < q) {
TRACE("Accepted node ", i, " with probability ", q, ".");
result.push_back(content[i]);
}
}
} else {
if (expectedNeighbours < 4 || probUB < 1/1000) {//select candidates directly instead of calling recursively
TRACE("probUB = ", probUB, ", switching to direct candidate selection.");
assert(probUB < 1);
const count stsize = size();
for (index i = 0; i < stsize; i++) {
double delta = std::log(Aux::Random::real()) / probdenom;
assert(delta >= 0);
i += delta;
TRACE("Jumped with delta ", delta, " arrived at ", i, ". Calling maybeGetKthElement.");
if (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point
else break;
candidatesTested++;
}
} else {//carry on as normal
for (index i = 0; i < children.size(); i++) {
TRACE("Recursively calling child ", i);
candidatesTested += children[i].getElementsProbabilistically(euQuery, prob, suppressLeft, result);
}
}
}
//DEBUG("Expected at most ", expectedNeighbours, " neighbours, got ", result.size() - offset);
return candidatesTested;
}
void maybeGetKthElement(double upperBound, Point2D<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const {
TRACE("Maybe get element ", k, " with upper Bound ", upperBound);
assert(k < size());
if (isLeaf) {
double acceptance = prob(euQuery.distance(positions[k]))/upperBound;
TRACE("Is leaf, accept with ", acceptance);
if (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]);
} else {
TRACE("Call recursively.");
index offset = 0;
for (index i = 0; i < children.size(); i++) {
count childsize = children[i].size();
if (k - offset < childsize) {
children[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens);
break;
}
offset += childsize;
}
}
}
/**
* Shrink all vectors in this subtree to fit the content.
* Call after quadtree construction is complete, causes better memory usage and cache efficiency
*/
void trim() {
content.shrink_to_fit();
positions.shrink_to_fit();
angles.shrink_to_fit();
radii.shrink_to_fit();
if (!isLeaf) {
for (index i = 0; i < children.size(); i++) {
children[i].trim();
}
}
}
/**
* Number of points lying in the region managed by this QuadNode
*/
count size() const {
return isLeaf ? content.size() : subTreeSize;
}
void recount() {
subTreeSize = 0;
for (index i = 0; i < children.size(); i++) {
children[i].recount();
subTreeSize += children[i].size();
}
}
/**
* Height of subtree hanging from this QuadNode
*/
count height() const {
count result = 1;//if leaf node, the children loop will not execute
for (auto child : children) result = std::max(result, child.height()+1);
return result;
}
/**
* Leaf cells in the subtree hanging from this QuadNode
*/
count countLeaves() const {
if (isLeaf) return 1;
count result = 0;
for (index i = 0; i < children.size(); i++) {
result += children[i].countLeaves();
}
return result;
}
double getLeftAngle() const {
return leftAngle;
}
double getRightAngle() const {
return rightAngle;
}
double getMinR() const {
return minR;
}
double getMaxR() const {
return maxR;
}
index getID() const {
return ID;
}
index indexSubtree(index nextID) {
index result = nextID;
assert(children.size() == 4 || children.size() == 0);
for (int i = 0; i < children.size(); i++) {
result = children[i].indexSubtree(result);
}
this->ID = result;
return result+1;
}
index getCellID(double phi, double r) const {
if (!responsible(phi, r)) return NetworKit::none;
if (isLeaf) return getID();
else {
for (int i = 0; i < children.size(); i++) {
index childresult = children[i].getCellID(phi, r);
if (childresult != NetworKit::none) return childresult;
}
throw std::runtime_error("No responsible child node found even though this node is responsible.");
}
}
index getMaxIDInSubtree() const {
if (isLeaf) return getID();
else {
index result = -1;
for (int i = 0; i < 4; i++) {
result = std::max(children[i].getMaxIDInSubtree(), result);
}
return std::max(result, getID());
}
}
count reindex(count offset) {
if (isLeaf)
{
#ifndef NETWORKIT_OMP2
#pragma omp task
#endif
{
index p = offset;
std::generate(content.begin(), content.end(), [&p](){return p++;});
}
offset += size();
} else {
for (int i = 0; i < 4; i++) {
offset = children[i].reindex(offset);
}
}
return offset;
}
};
}
#endif /* QUADNODE_H_ */
|
GB_binop__lxor_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int16)
// A*D function (colscale): GB (_AxD__lxor_int16)
// D*A function (rowscale): GB (_DxB__lxor_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int16)
// C=scalar+B GB (_bind1st__lxor_int16)
// C=scalar+B' GB (_bind1st_tran__lxor_int16)
// C=A+scalar GB (_bind2nd__lxor_int16)
// C=A'+scalar GB (_bind2nd_tran__lxor_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT16 || GxB_NO_LXOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mypaint-tiled-surface.c | /* libmypaint - The MyPaint Brush Library
* Copyright (C) 2007-2014 Martin Renold <martinxyz@gmx.ch> et. al.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <config.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mypaint-config.h"
#include "mypaint-tiled-surface.h"
#include "tiled-surface-private.h"
#include "helpers.h"
#include "brushmodes.h"
#include "operationqueue.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void process_tile(MyPaintTiledSurface *self, int tx, int ty);
static void
begin_atomic_default(MyPaintSurface *surface)
{
mypaint_tiled_surface_begin_atomic((MyPaintTiledSurface *)surface);
}
static void
end_atomic_default(MyPaintSurface *surface, MyPaintRectangle *roi)
{
mypaint_tiled_surface_end_atomic((MyPaintTiledSurface *)surface, roi);
}
/**
* mypaint_tiled_surface_begin_atomic: (skip)
*
* Implementation of #MyPaintSurface::being_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::begin_atomic vfunc.
* Application code should only use mypaint_surface_being_atomic()
*/
void
mypaint_tiled_surface_begin_atomic(MyPaintTiledSurface *self)
{
self->dirty_bbox.height = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.x = 0;
}
/**
* mypaint_tiled_surface_end_atomic: (skip)
*
* Implementation of #MyPaintSurface::end_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::end_atomic vfunc.
* Application code should only use mypaint_surface_end_atomic().
*/
void
mypaint_tiled_surface_end_atomic(MyPaintTiledSurface *self, MyPaintRectangle *roi)
{
// Process tiles
TileIndex *tiles;
int tiles_n = operation_queue_get_dirty_tiles(self->operation_queue, &tiles);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int i = 0; i < tiles_n; i++) {
process_tile(self, tiles[i].x, tiles[i].y);
}
operation_queue_clear_dirty_tiles(self->operation_queue);
if (roi) {
*roi = self->dirty_bbox;
}
}
/**
* mypaint_tiled_surface_tile_request_start:
*
* Fetch a tile out from the underlying tile store.
* When successful, request->data will be set to point to the fetched tile.
* Consumers must *always* call mypaint_tiled_surface_tile_request_end() with the same
* request to complete the transaction.
*/
void mypaint_tiled_surface_tile_request_start(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_start);
self->tile_request_start(self, request);
}
/**
* mypaint_tiled_surface_tile_request_end:
*
* Put a (potentially modified) tile back into the underlying tile store.
*
* Consumers must *always* call mypaint_tiled_surface_tile_request_start() with the same
* request to start the transaction before calling this function.
*/
void mypaint_tiled_surface_tile_request_end(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_end);
self->tile_request_end(self, request);
}
/* FIXME: either expose this through MyPaintSurface, or move it into the brush engine */
/**
* mypaint_tiled_surface_set_symmetry_state:
* @active: TRUE to enable, FALSE to disable.
* @center_x: X axis to mirror events across.
* @center_y: Y axis to mirror events across.
* @symmetry_type: Symmetry type to activate.
* @rot_symmetry_lines: Number of rotational symmetry lines.
*
* Enable/Disable symmetric brush painting across an X axis.
*/
void
mypaint_tiled_surface_set_symmetry_state(MyPaintTiledSurface *self, gboolean active,
float center_x, float center_y,
MyPaintSymmetryType symmetry_type,
int rot_symmetry_lines)
{
self->surface_do_symmetry = active;
self->surface_center_x = center_x;
self->surface_center_y = center_y;
self->symmetry_type = symmetry_type;
self->rot_symmetry_lines = MAX(2, rot_symmetry_lines);
}
/**
* mypaint_tile_request_init:
*
* Initialize a request for use with mypaint_tiled_surface_tile_request_start()
* and mypaint_tiled_surface_tile_request_end()
*/
void
mypaint_tile_request_init(MyPaintTileRequest *data, int level,
int tx, int ty, gboolean readonly)
{
data->tx = tx;
data->ty = ty;
data->readonly = readonly;
data->buffer = NULL;
data->context = NULL;
#ifdef _OPENMP
data->thread_id = omp_get_thread_num();
#else
data->thread_id = -1;
#endif
data->mipmap_level = level;
}
// Must be threadsafe
static inline float
calculate_r_sample(float x, float y, float aspect_ratio,
float sn, float cs)
{
const float yyr=(y*cs-x*sn)*aspect_ratio;
const float xxr=y*sn+x*cs;
const float r = (yyr*yyr + xxr*xxr);
return r;
}
static inline float
calculate_rr(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2)
{
// code duplication, see brush::count_dabs_to()
const float yy = (yp + 0.5f - y);
const float xx = (xp + 0.5f - x);
const float yyr=(yy*cs-xx*sn)*aspect_ratio;
const float xxr=yy*sn+xx*cs;
const float rr = (yyr*yyr + xxr*xxr) * one_over_radius2;
// rr is in range 0.0..1.0*sqrt(2)
return rr;
}
static inline float
sign_point_in_line( float px, float py, float vx, float vy )
{
return (px - vx) * (-vy) - (vx) * (py - vy);
}
static inline void
closest_point_to_line( float lx, float ly, float px, float py, float *ox, float *oy )
{
const float l2 = lx*lx + ly*ly;
const float ltp_dot = px*lx + py*ly;
const float t = ltp_dot / l2;
*ox = lx * t;
*oy = ly * t;
}
// Must be threadsafe
//
// This works by taking the visibility at the nearest point
// and dividing by 1.0 + delta.
//
// - nearest point: point where the dab has more influence
// - farthest point: point at a fixed distance away from
// the nearest point
// - delta: how much occluded is the farthest point relative
// to the nearest point
static inline float
calculate_rr_antialiased(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2,
float r_aa_start)
{
// calculate pixel position and borders in a way
// that the dab's center is always at zero
float pixel_right = x - (float)xp;
float pixel_bottom = y - (float)yp;
float pixel_center_x = pixel_right - 0.5f;
float pixel_center_y = pixel_bottom - 0.5f;
float pixel_left = pixel_right - 1.0f;
float pixel_top = pixel_bottom - 1.0f;
float nearest_x, nearest_y; // nearest to origin, but still inside pixel
float farthest_x, farthest_y; // farthest from origin, but still inside pixel
float r_near, r_far, rr_near, rr_far;
// Dab's center is inside pixel?
if( pixel_left<0 && pixel_right>0 &&
pixel_top<0 && pixel_bottom>0 )
{
nearest_x = 0;
nearest_y = 0;
r_near = rr_near = 0;
}
else
{
closest_point_to_line( cs, sn, pixel_center_x, pixel_center_y, &nearest_x, &nearest_y );
nearest_x = CLAMP( nearest_x, pixel_left, pixel_right );
nearest_y = CLAMP( nearest_y, pixel_top, pixel_bottom );
// XXX: precision of "nearest" values could be improved
// by intersecting the line that goes from nearest_x/Y to 0
// with the pixel's borders here, however the improvements
// would probably not justify the perdormance cost.
r_near = calculate_r_sample( nearest_x, nearest_y, aspect_ratio, sn, cs );
rr_near = r_near * one_over_radius2;
}
// out of dab's reach?
if( rr_near > 1.0f )
return rr_near;
// check on which side of the dab's line is the pixel center
float center_sign = sign_point_in_line( pixel_center_x, pixel_center_y, cs, -sn );
// radius of a circle with area=1
// A = pi * r * r
// r = sqrt(1/pi)
const float rad_area_1 = sqrtf( 1.0f / M_PI );
// center is below dab
if( center_sign < 0 )
{
farthest_x = nearest_x - sn*rad_area_1;
farthest_y = nearest_y + cs*rad_area_1;
}
// above dab
else
{
farthest_x = nearest_x + sn*rad_area_1;
farthest_y = nearest_y - cs*rad_area_1;
}
r_far = calculate_r_sample( farthest_x, farthest_y, aspect_ratio, sn, cs );
rr_far = r_far * one_over_radius2;
// check if we can skip heavier AA
if( r_far < r_aa_start )
return (rr_far+rr_near) * 0.5f;
// calculate AA approximate
float visibilityNear = 1.0f - rr_near;
float delta = rr_far - rr_near;
float delta2 = 1.0f + delta;
visibilityNear /= delta2;
return 1.0f - visibilityNear;
}
static inline float
calculate_opa(float rr, float hardness,
float segment1_offset, float segment1_slope,
float segment2_offset, float segment2_slope) {
const float fac = rr <= hardness ? segment1_slope : segment2_slope;
float opa = rr <= hardness ? segment1_offset : segment2_offset;
opa += rr*fac;
if (rr > 1.0f) {
opa = 0.0f;
}
#ifdef HEAVY_DEBUG
assert(isfinite(opa));
assert(opa >= 0.0f && opa <= 1.0f);
#endif
return opa;
}
// Must be threadsafe
void render_dab_mask (uint16_t * mask,
float x, float y,
float radius,
float hardness,
float aspect_ratio, float angle
)
{
hardness = CLAMP(hardness, 0.0, 1.0);
if (aspect_ratio<1.0) aspect_ratio=1.0;
assert(hardness != 0.0); // assured by caller
// For a graphical explanation, see:
// http://wiki.mypaint.info/Development/Documentation/Brushlib
//
// The hardness calculation is explained below:
//
// Dab opacity gradually fades out from the center (rr=0) to
// fringe (rr=1) of the dab. How exactly depends on the hardness.
// We use two linear segments, for which we pre-calculate slope
// and offset here.
//
// opa
// ^
// * .
// | *
// | .
// +-----------*> rr = (distance_from_center/radius)^2
// 0 1
//
float segment1_offset = 1.0f;
float segment1_slope = -(1.0f/hardness - 1.0f);
float segment2_offset = hardness/(1.0f-hardness);
float segment2_slope = -hardness/(1.0f-hardness);
// for hardness == 1.0, segment2 will never be used
float angle_rad=angle/360*2*M_PI;
float cs=cos(angle_rad);
float sn=sin(angle_rad);
const float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int x0 = floor (x - r_fringe);
int y0 = floor (y - r_fringe);
int x1 = floor (x + r_fringe);
int y1 = floor (y + r_fringe);
if (x0 < 0) x0 = 0;
if (y0 < 0) y0 = 0;
if (x1 > MYPAINT_TILE_SIZE-1) x1 = MYPAINT_TILE_SIZE-1;
if (y1 > MYPAINT_TILE_SIZE-1) y1 = MYPAINT_TILE_SIZE-1;
const float one_over_radius2 = 1.0f/(radius*radius);
// Pre-calculate rr and put it in the mask.
// This an optimization that makes use of auto-vectorization
// OPTIMIZE: if using floats for the brush engine, store these directly in the mask
float rr_mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
if (radius < 3.0f)
{
const float aa_border = 1.0f;
float r_aa_start = ((radius>aa_border) ? (radius-aa_border) : 0);
r_aa_start *= r_aa_start / aspect_ratio;
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr_antialiased(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2,
r_aa_start);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
else
{
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
// we do run length encoding: if opacity is zero, the next
// value in the mask is the number of pixels that can be skipped.
uint16_t * mask_p = mask;
int skip=0;
skip += y0*MYPAINT_TILE_SIZE;
for (int yp = y0; yp <= y1; yp++) {
skip += x0;
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = rr_mask[(yp*MYPAINT_TILE_SIZE)+xp];
const float opa = calculate_opa(rr, hardness,
segment1_offset, segment1_slope,
segment2_offset, segment2_slope);
const uint16_t opa_ = opa * (1<<15);
if (!opa_) {
skip++;
} else {
if (skip) {
*mask_p++ = 0;
*mask_p++ = skip*4;
skip = 0;
}
*mask_p++ = opa_;
}
}
skip += MYPAINT_TILE_SIZE-xp;
}
*mask_p++ = 0;
*mask_p++ = 0;
}
// Must be threadsafe
void
process_op(uint16_t *rgba_p, uint16_t *mask,
int tx, int ty, OperationDataDrawDab *op)
{
// first, we calculate the mask (opacity for each pixel)
render_dab_mask(mask,
op->x - tx*MYPAINT_TILE_SIZE,
op->y - ty*MYPAINT_TILE_SIZE,
op->radius,
op->hardness,
op->aspect_ratio, op->angle
);
// second, we use the mask to stamp a dab for each activated blend mode
if (op->paint < 1.0) {
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*(1 - op->paint)*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15),
op->normal*op->opaque*(1 - op->paint)*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->lock_alpha*op->opaque*(1 - op->colorize)*(1 - op->posterize)*(1 - op->paint)*(1<<15));
}
}
if (op->paint > 0.0) {
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal_Paint(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*op->paint*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser_Paint(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15),
op->normal*op->opaque*op->paint*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha_Paint(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->lock_alpha*op->opaque*(1 - op->colorize)*(1 - op->posterize)*op->paint*(1<<15));
}
}
if (op->colorize) {
draw_dab_pixels_BlendMode_Color(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->colorize*op->opaque*(1<<15));
}
if (op->posterize) {
draw_dab_pixels_BlendMode_Posterize(mask, rgba_p,
op->posterize*op->opaque*(1<<15),
op->posterize_num);
}
}
// Must be threadsafe
void
process_tile(MyPaintTiledSurface *self, int tx, int ty)
{
TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op = operation_queue_pop(self->operation_queue, tile_index);
if (!op) {
return;
}
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, FALSE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
return;
}
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
while (op) {
process_op(rgba_p, mask, tile_index.x, tile_index.y, op);
free(op);
op = operation_queue_pop(self->operation_queue, tile_index);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
// OPTIMIZE: send a list of the exact changed rects instead of a bounding box
// to minimize the area being composited? Profile to see the effect first.
void
update_dirty_bbox(MyPaintTiledSurface *self, OperationDataDrawDab *op)
{
int bb_x, bb_y, bb_w, bb_h;
float r_fringe = op->radius + 1.0f; // +1.0 should not be required, only to be sure
bb_x = floor (op->x - r_fringe);
bb_y = floor (op->y - r_fringe);
bb_w = floor (op->x + r_fringe) - bb_x + 1;
bb_h = floor (op->y + r_fringe) - bb_y + 1;
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x, bb_y);
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x+bb_w-1, bb_y+bb_h-1);
}
// returns TRUE if the surface was modified
gboolean draw_dab_internal (MyPaintTiledSurface *self, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize,
float posterize,
float posterize_num,
float paint
)
{
OperationDataDrawDab op_struct;
OperationDataDrawDab *op = &op_struct;
op->x = x;
op->y = y;
op->radius = radius;
op->aspect_ratio = aspect_ratio;
op->angle = angle;
op->opaque = CLAMP(opaque, 0.0f, 1.0f);
op->hardness = CLAMP(hardness, 0.0f, 1.0f);
op->lock_alpha = CLAMP(lock_alpha, 0.0f, 1.0f);
op->colorize = CLAMP(colorize, 0.0f, 1.0f);
op->posterize = CLAMP(posterize, 0.0f, 1.0f);
op->posterize_num= CLAMP(ROUND(posterize_num * 100.0), 1, 128);
op->paint = CLAMP(paint, 0.0f, 1.0f);
if (op->radius < 0.1f) return FALSE; // don't bother with dabs smaller than 0.1 pixel
if (op->hardness == 0.0f) return FALSE; // infintly small center point, fully transparent outside
if (op->opaque == 0.0f) return FALSE;
color_r = CLAMP(color_r, 0.0f, 1.0f);
color_g = CLAMP(color_g, 0.0f, 1.0f);
color_b = CLAMP(color_b, 0.0f, 1.0f);
color_a = CLAMP(color_a, 0.0f, 1.0f);
op->color_r = color_r * (1<<15);
op->color_g = color_g * (1<<15);
op->color_b = color_b * (1<<15);
op->color_a = color_a;
// blending mode preparation
op->normal = 1.0f;
op->normal *= 1.0f-op->lock_alpha;
op->normal *= 1.0f-op->colorize;
op->normal *= 1.0f-op->posterize;
if (op->aspect_ratio<1.0f) op->aspect_ratio=1.0f;
// Determine the tiles influenced by operation, and queue it for processing for each tile
float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
const TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op_copy = (OperationDataDrawDab *)malloc(sizeof(OperationDataDrawDab));
*op_copy = *op;
operation_queue_add(self->operation_queue, tile_index, op_copy);
}
}
update_dirty_bbox(self, op);
return TRUE;
}
// returns TRUE if the surface was modified
int draw_dab (MyPaintSurface *surface, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize,
float posterize,
float posterize_num,
float paint)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
gboolean surface_modified = FALSE;
// Normal pass
if (draw_dab_internal(self, x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle,
lock_alpha, colorize, posterize, posterize_num, paint)) {
surface_modified = TRUE;
}
// Symmetry pass
if(self->surface_do_symmetry) {
const float dist_x = (self->surface_center_x - x);
const float dist_y = (self->surface_center_y - y);
const float symm_x = self->surface_center_x + dist_x;
const float symm_y = self->surface_center_y + dist_y;
const float dab_dist = sqrt(dist_x * dist_x + dist_y * dist_y);
const float rot_width = 360.0 / ((float) self->rot_symmetry_lines);
const float dab_angle_offset = atan2(-dist_y, -dist_x) / (2 * M_PI) * 360.0;
int dab_count = 1;
int sub_dab_count = 0;
switch(self->symmetry_type) {
case MYPAINT_SYMMETRY_TYPE_VERTICAL:
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize, posterize, posterize_num, paint)) {
surface_modified = TRUE;
}
break;
case MYPAINT_SYMMETRY_TYPE_HORIZONTAL:
if (draw_dab_internal(self, x, symm_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle + 180.0,
lock_alpha, colorize, posterize, posterize_num, paint)) {
surface_modified = TRUE;
}
break;
case MYPAINT_SYMMETRY_TYPE_VERTHORZ:
// reflect vertically
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize, posterize, posterize_num, paint)) {
dab_count++;
}
// reflect horizontally
if (draw_dab_internal(self, x, symm_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle + 180.0,
lock_alpha, colorize, posterize, posterize_num, paint)) {
dab_count++;
}
// reflect horizontally and vertically
if (draw_dab_internal(self, symm_x, symm_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle - 180.0,
lock_alpha, colorize, posterize, posterize_num, paint)) {
dab_count++;
}
if (dab_count == 4) {
surface_modified = TRUE;
}
break;
case MYPAINT_SYMMETRY_TYPE_SNOWFLAKE: {
gboolean failed_subdabs = FALSE;
// draw self->rot_symmetry_lines snowflake dabs
// because the snowflaked version of the initial dab
// was not done through carrying out the initial pass
for (sub_dab_count = 0; sub_dab_count < self->rot_symmetry_lines; sub_dab_count++) {
// calculate the offset from rotational symmetry
const float symmetry_angle_offset = ((float)sub_dab_count) * rot_width;
// subtract the angle offset since we're progressing clockwise
const float cur_angle = symmetry_angle_offset - dab_angle_offset;
// progress through the rotation angle offsets clockwise
// to reflect the dab relative to itself
const float rot_x = self->surface_center_x - dab_dist*cos(cur_angle / 180.0 * M_PI);
const float rot_y = self->surface_center_y - dab_dist*sin(cur_angle / 180.0 * M_PI);
if (!draw_dab_internal(self, rot_x, rot_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a,
aspect_ratio, -angle + symmetry_angle_offset,
lock_alpha, colorize, posterize, posterize_num, paint)) {
failed_subdabs = TRUE;
break;
}
}
// do not bother falling to rotational if the snowflaked dabs failed
if (failed_subdabs) {
break;
}
// if it succeeded, fallthrough to rotational to finish the process
}
case MYPAINT_SYMMETRY_TYPE_ROTATIONAL: {
// draw self-rot_symmetry_lines rotational dabs
// since initial pass handles the first dab
for (dab_count = 1; dab_count < self->rot_symmetry_lines; dab_count++)
{
// calculate the offset from rotational symmetry
const float symmetry_angle_offset = ((float)dab_count) * rot_width;
// add the angle initial dab is from center point
const float cur_angle = symmetry_angle_offset + dab_angle_offset;
// progress through the rotation cangle offsets counterclockwise
const float rot_x = self->surface_center_x + dab_dist*cos(cur_angle / 180.0 * M_PI);
const float rot_y = self->surface_center_y + dab_dist*sin(cur_angle / 180.0 * M_PI);
if (!draw_dab_internal(self, rot_x, rot_y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio,
angle + symmetry_angle_offset,
lock_alpha, colorize, posterize, posterize_num, paint)) {
break;
}
}
if (dab_count == self->rot_symmetry_lines) {
surface_modified = TRUE;
}
break;
}
}
}
return surface_modified;
}
void get_color (MyPaintSurface *surface, float x, float y,
float radius,
float * color_r, float * color_g, float * color_b, float * color_a
)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
if (radius < 1.0f) radius = 1.0f;
const float hardness = 0.5f;
const float aspect_ratio = 1.0f;
const float angle = 0.0f;
float sum_weight, sum_r, sum_g, sum_b, sum_a;
sum_weight = sum_r = sum_g = sum_b = sum_a = 0.0f;
// in case we return with an error
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
// WARNING: some code duplication with draw_dab
float r_fringe = radius + 1.0f; // +1 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
#ifdef _OPENMP
int tiles_n = (tx2 - tx1) * (ty2 - ty1);
#endif
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
// Flush queued draw_dab operations
process_tile(self, tx, ty);
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, TRUE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
break;
}
// first, we calculate the mask (opacity for each pixel)
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
render_dab_mask(mask,
x - tx*MYPAINT_TILE_SIZE,
y - ty*MYPAINT_TILE_SIZE,
radius,
hardness,
aspect_ratio, angle
);
// TODO: try atomic operations instead
#pragma omp critical
{
get_color_pixels_accumulate (mask, rgba_p,
&sum_weight, &sum_r, &sum_g, &sum_b, &sum_a);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
}
assert(sum_weight > 0.0f);
sum_a /= sum_weight;
sum_r /= sum_weight;
sum_g /= sum_weight;
sum_b /= sum_weight;
*color_a = sum_a;
// now un-premultiply the alpha
if (sum_a > 0.0f) {
*color_r = sum_r / sum_a;
*color_g = sum_g / sum_a;
*color_b = sum_b / sum_a;
} else {
// it is all transparent, so don't care about the colors
// (let's make them ugly so bugs will be visible)
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
}
// fix rounding problems that do happen due to floating point math
*color_r = CLAMP(*color_r, 0.0f, 1.0f);
*color_g = CLAMP(*color_g, 0.0f, 1.0f);
*color_b = CLAMP(*color_b, 0.0f, 1.0f);
*color_a = CLAMP(*color_a, 0.0f, 1.0f);
}
/**
* mypaint_tiled_surface_init: (skip)
*
* Initialize the surface, passing in implementations of the tile backend.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
**/
void
mypaint_tiled_surface_init(MyPaintTiledSurface *self,
MyPaintTileRequestStartFunction tile_request_start,
MyPaintTileRequestEndFunction tile_request_end)
{
mypaint_surface_init(&self->parent);
self->parent.draw_dab = draw_dab;
self->parent.get_color = get_color;
self->parent.begin_atomic = begin_atomic_default;
self->parent.end_atomic = end_atomic_default;
self->tile_request_end = tile_request_end;
self->tile_request_start = tile_request_start;
self->tile_size = MYPAINT_TILE_SIZE;
self->threadsafe_tile_requests = FALSE;
self->dirty_bbox.x = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.height = 0;
self->surface_do_symmetry = FALSE;
self->symmetry_type = MYPAINT_SYMMETRY_TYPE_VERTICAL;
self->surface_center_x = 0.0f;
self->surface_center_y = 0.0f;
self->rot_symmetry_lines = 2;
self->operation_queue = operation_queue_new();
}
/**
* mypaint_tiled_surface_destroy: (skip)
*
* Deallocate resources set up by mypaint_tiled_surface_init()
* Does not free the #MyPaintTiledSurface itself.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
*/
void
mypaint_tiled_surface_destroy(MyPaintTiledSurface *self)
{
operation_queue_free(self->operation_queue);
}
|
looptc_c2.c | #include <string.h>
void deinterleave(char *page, char *transposed, const int ntabs, const int nchannels, const int ntimes, const int padded_size) {
int tab;
for (tab = 0; tab < ntabs; tab++) {
// unroll time dimension once
int time;
for (time = 0; time < ntimes; time+=2) {
// build temporary array containing 2 complete channel rows
char temp[2 * nchannels];
int channel;
#pragma omp parallel for
for (channel = 0; channel < nchannels; channel++) {
// reverse freq order to comply with header
temp[1*nchannels-channel-1] = page[(tab*nchannels + channel) * padded_size + (time + 0)];
temp[2*nchannels-channel-1] = page[(tab*nchannels + channel) * padded_size + (time + 1)];
}
// copy 2 full row at once
memcpy(&transposed[time*nchannels], temp, 2*nchannels);
}
}
}
|
vote.h | /*******************************************************************************
* vote.h - voting implementations
*******************************************************************************
* Add license here...
*******************************/
#ifndef VOTE_H
#define VOTE_H
#include <filter.h>
#include <gb.h>
#include <nnf.h>
#include <patch.h>
#include <rng.h>
#include <voting/histogram.h>
#include <voting/meanshift.h>
namespace pm {
#define NOT_NULL(type) reinterpret_cast<type *>(1)
/**
* \brief The method of vote
*/
enum VoteMethod {
/**
* \brief Bidirectional Similarity vothing
* \see http://www.wisdom.weizmann.ac.il/~vision/VisualSummary.html
*/
BiDirSimVoting,
/**
* \brief Bidirectional Similarity with Histogram voting
*/
BiDirSimWithHistogramVoting,
/**
* \brief Default voting per pixel (over the corresponding overlapping patches)
*/
DefaultVoting,
/**
* \brief Default voting per pixel, with patch gain+bias adjustment
*/
DefaultGBAVoting,
/**
* \brief Direct pixel voting
*/
DirectVoting,
/**
* \brief Feature-specific voting with histograms for the rest
*/
FeatureWithHistogramVoting,
/**
* \brief Voting per pixel, with histogram reweighting
*/
HistogramVoting,
/**
* \brief Mean-shift voting per pixel (over the corresponding overlapping patches)
*/
MeanShiftVoting,
/**
* \brief Median voting per pixel (over the corresponding overlapping patches)
*/
MedianVoting,
/**
* \brief Tiled voting per patch (over the corresponding overlapped pixels)
*/
TiledVoting,
/**
* \brief Default voting using a mask as global weight mask
*/
WeightedVoting
};
/**
* \brief Type of output weight
*/
enum WeightType {
NoWeight,
PixelWeight,
PixelVariance
};
typedef const float& (*BinFloatOp)(const float&, const float&);
/**
* Parameters for voting
*/
struct VoteParams {
// General:
int patchSize;
VoteMethod method;
Filter filter;
// Output data:
WeightType weightType;
float *weights;
// BiDir Simimilarity:
float bidirSimWeight;
// Mean-shift:
voting::MeanShiftParams meanShift;
// Histogram:
std::vector<int> histChannels;
std::vector<int> histBins;
std::vector<Range> histRanges;
std::vector<float> histWeights;
std::vector<float> histBoosts;
bool histNormalize;
// Weighted:
Mask weightMask;
float weightBase;
// Feature-specific:
BinFloatOp featureOp;
float featureDefault;
std::vector<int> binaryChannels;
VoteParams() :
patchSize(7),
method(DefaultVoting),
filter(1),
weightType(NoWeight),
weights(NULL),
bidirSimWeight(0.5f),
histChannels(),
histBins(),
histRanges(),
histWeights(),
histBoosts(),
histNormalize(true),
weightBase(2.0f),
featureOp(NULL),
binaryChannels(){
}
};
template <typename Patch, typename Scalar>
Image vote(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams ¶ms);
template <int channels, typename Patch, typename Scalar>
inline float pixelvar(const NearestNeighborField<Patch, Scalar> *nnf,
int y, int startY, int endY,
int x, int startX, int endX,
Vec<Scalar, channels> mean) {
typedef Vec<Scalar, channels> PixVal;
const Texture *target = nnf->target;
// variance sum
float w = 0;
int N = 0;
// for each pixel from the corresponding patches
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
const Patch &patch = nnf->get(py, px);
int by = y - py; //< pixelInPatch = pos - patchPos
int bx = x - px;
PixVal value = target->at<PixVal>(patch.transform(by, bx));
PixVal diff = mean - value;
w += diff.dot(diff);
++N;
}
}
return w / N;
}
/**
* \brief Vote by using a simple filter over overlapping patches
*
* \param nnf
* the nearest neighbor field to vote with
* \param params
* the voting parameters
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_filter_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
const Texture *source = nnf->source;
const Texture *target = nnf->target;
Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels));
const Filter &filter = params.filter;
// for each pixel of the source
#if _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int y = 0; y < source->rows; ++y) {
for (int x = 0; x < source->cols; ++x) {
PixVal &votedPixel = vote.at<PixVal>(y, x);
float w = 0.0f;
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, nnf->width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, nnf->height);
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
const Patch &patch = nnf->get(py, px);
// /!\ Convolution: the filter is reversed in time
// => at pos (x,y), the filter is at (0, 0)
// => at pos (x-dx, y-dy), the filter is at (dx, dy)
// ++ we assume that only patches on the left / top have a contribution
int by = y - py; //< pixelInPatch = pos - patchPos
int bx = x - px;
PixVal value = target->at<PixVal>(patch.transform(by, bx));
votedPixel += value * filter[by][bx];
w += filter[by][bx];
}
}
if (w > 1e-8) {
votedPixel *= 1.0 / w;
} else {
#if _OPENMP
#pragma omp critical
#endif
{
std::cerr << "w0 @" << y << "/" << x << ", from: " << startY << "/" << startX;
std::cerr << " to " << endY << "/" << endX << "\n";
}
}
if (votedPixel[0] > 1e10) {
#if _OPENMP
#pragma omp critical
#endif
{
std::cerr << "Corrupted vote p@" << y << "/" << x << ": ";
for (int i = 0; i < channels; ++i) std::cerr << votedPixel[i] << ", ";
std::cerr << "\n";
}
}
// binary channels
for(int i = 0; i < params.binaryChannels.size(); ++i) {
Scalar &v = votedPixel[params.binaryChannels[i]];
if(v >= 50.0){
v = 100.0;
} else {
v = 0.0;
}
}
// store in output weight map if there is one
switch(params.weightType) {
case PixelWeight:
params.weights[source->cols * y + x] = w;
break;
case PixelVariance:
params.weights[source->cols * y + x] = pixelvar(
nnf,
y, startY, endY,
x, startX, endX,
votedPixel
);
break;
}
}
}
return vote;
}
/**
* \brief Vote by using a simple filter over overlapping patches
* and gain+bias adjustment of patches
*
* \param nnf
* the nearest neighbor field to vote with
* \param params
* the voting parameters
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_filter_gba_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
const Texture *source = nnf->source;
const Texture *target = nnf->target;
Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels));
int total = source->rows * source->cols;
const Filter &filter = params.filter;
// 1 = compute gain+bias of each target patch
typedef GainBias<Scalar> GB;
typedef typename GB::Vec3 Vec3;
Vec3 *gains = new Vec3[nnf->size], *biases = new Vec3[nnf->size];
#if _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int y = 0; y < nnf->height; ++y) {
for (int x = 0; x < nnf->width; ++x) {
const typename Patch::OriginalPatchType srcPatch(y, x);
const Patch &patch = nnf->get(y, x);
GB::template compute<channels>(source, target, srcPatch, patch,
gains[nnf->width * y + x], biases[nnf->width * y + x]);
}
}
// for each pixel of the source
#if _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int y = 0; y < source->rows; ++y) {
for (int x = 0; x < source->cols; ++x) {
float w = 0.0f;
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, nnf->width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, nnf->height);
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
const Patch &patch = nnf->get(py, px);
// /!\ Convolution: the filter is reversed in time
// => at pos (x,y), the filter is at (0, 0)
// => at pos (x-dx, y-dy), the filter is at (dx, dy)
// ++ we assume that only patches on the left / top have a contribution
int by = y - py; //< pixelInPatch = pos - patchPos
int bx = x - px;
PixVal value = target->at<PixVal>(patch.transform(by, bx));
// apply gain+bias adjustment
GB::template applyOn<channels>(value, gains[nnf->width * py + px], biases[nnf->width * py + px]);
// register the pixel vote
vote.at<PixVal>(y, x) += value * filter[by][bx];
w += filter[by][bx];
}
}
if (w > 1e-8) {
vote.at<PixVal>(y, x) *= 1.0 / w;
}
// store in output weight map if there is one
if (params.weightType == PixelWeight) params.weights[source->cols * y + x] = w;
}
}
return vote;
}
/**
* \brief Direct vote
*
* \param nnf
* the nearest neighbor field to vote with
* \param params
* the voting parameters
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_direct_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
const Texture *source = nnf->source;
const Texture *target = nnf->target;
Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels));
int total = source->rows * source->cols;
if (params.weightType != NoWeight) {
// store in output weight map if there is one
switch(params.weightType) {
case PixelWeight:
std::fill(params.weights, params.weights + total, 1.0f);
break;
case PixelVariance:
std::fill(params.weights, params.weights + total, 0.0f);
break;
}
}
// for each pixel of the source
#if _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int y = 0; y < source->rows; ++y) {
for (int x = 0; x < source->cols; ++x) {
// patch position
int py = std::min(y, nnf->height - 1);
int px = std::min(x, nnf->width - 1);
const Patch &patch = nnf->get(py, px);
// pixel within patch
int by = y - py;
int bx = x - px;
vote.at<PixVal>(y, x) = target->at<PixVal>(patch.transform(by, bx));
}
}
return vote;
}
/**
* \brief Vote by using a simple filter over overlapping patches
* and a tiled strategy with openmp
*
* \param nnf
* the nearest neighbor field to vote with
* \param params
* the voting parameters
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_filter_tiled_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
const Texture *source = nnf->source;
const Texture *target = nnf->target;
const int h = nnf->height, w = nnf->width; //< nnf bounds
const int sh = source->rows, sw = source->cols; //< voted image bounds
Image vote = Image::zeros(sh, sw, IM_32FC(channels));
int total = source->rows * source->cols;
float *weights = new float[total](); // /!\ Use default constructor!
if (params.weights != NULL) {
switch(params.weightType) {
case PixelWeight:
params.weights = weights;
break;
case PixelVariance:
std::fill(params.weights, params.weights + total, 0.0f); //< no valid
break;
}
}
const Filter &filter = params.filter;
#if _OPENMP
#pragma omp parallel
#endif
{
int tid = omp_get_thread_num(), nt = omp_get_num_threads();
int patchY = std::ceil(float(h) / Patch::width());
int patchX = std::ceil(float(w) / Patch::width());
int tiles = patchY * patchX;
int minTile = tiles * tid / nt;
int maxTile = tiles * (tid + 1) / nt;
// for each tile
for (int tile = minTile; tile < maxTile; ++tile) {
// for each patch (pixel) of the corresponding tile
int x = (tile % patchX) * Patch::width();
int nX = std::min(x + Patch::width(), w);
for (; x < nX; ++x) {
int y = (tile / patchX) * Patch::width();
int nY = std::min(y + Patch::width(), h);
for (; y < nY; ++y) {
// /!\ This ^ was done over the nnf field that is smaller than the source!
const Patch &patch = nnf->get(y, x);
// for each pixel in the corresponding patch
for (int px = 0; px < Patch::width(); ++px) {
for (int py = 0; py < Patch::width(); ++py) {
// /!\ <-- this is done over the source (nff + patch size)
PixVal value = target->at<PixVal>(patch.transform(py, px));
vote.at<PixVal>(y + py, x + px) += value * filter[py][px];
weights[sw * (y + py) + x + px] += filter[py][px];
}
}
}
}
}
}
std::cout << ". normalizing\n";
// normalization of the pixel values
#if _OPENMP
#pragma omp for collapse(2)
#endif
for (int y = 0; y < sh; ++y) {
for (int x = 0; x < sw; ++x) {
float &w = weights[sw * y + x];
if (w > 1e-8) {
vote.at<PixVal>(y, x) *= 1.0 / w;
} else {
w = 0.0f;
vote.at<PixVal>(y, x) *= 0.0;
}
}
}
// free and return
if (params.weights == NULL) {
// nobody will still use it => free it!
delete[] weights;
}
return vote;
}
template <int channels, typename Patch, typename Scalar>
Image vote_meanshift_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
typedef voting::Cluster<Scalar, channels> Cluster;
const Texture *source = nnf->source;
const Texture *target = nnf->target;
Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels));
// for each pixel of the source
#if _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int y = 0; y < source->rows; ++y) {
for (int x = 0; x < source->cols; ++x) {
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, nnf->width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, nnf->height);
// mean-shift voting on the feature space
// generated by the pixels on (startY:endY-1, startX:endX-1)
// - use params.meanShiftWindows to discard
typedef std::vector<PixVal> PixCluster;
// workspace
int pw = endX - startX;
int ph = endY - startY;
int N = pw * ph;
std::vector<PixVal> points(N, PixVal()); //< data points
int pidx = 0;
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px, ++pidx) {
const Patch &patch = nnf->get(py, px);
points[pidx] = target->at<PixVal>(patch.transform(y - py, x - px)); // * filter[by][bx];
}
}
// mean-shift algorithm
std::vector<Cluster> clusters;
clusters.reserve(5);
voting::meanshift(points, clusters, params.meanShift);
// pixel vote using the best cluster
int best = 0;
for (int i = 1, n = clusters.size(); i < n; ++i) {
if (clusters[i].votes > clusters[best].votes) {
best = i;
}
}
vote.at<PixVal>(y, x) = clusters[best].mean;
switch(params.weightType) {
case PixelWeight:
params.weights[vote.cols * y + x] = float(clusters[best].votes);
break;
case PixelVariance:
params.weights[vote.cols * y + x] = pixelvar(
nnf,
y, startY, endY,
x, startX, endX,
vote.at<PixVal>(y, x)
);
break;
}
}
}
return vote;
}
template <int K, typename Scalar>
class MedianList{
public:
typedef float Weight;
typedef std::pair<Scalar, Weight> Item;
struct ItemOrder {
bool operator()(const Item &left, const Item &right) {
return left.first < right.first;
}
};
typedef Vec<Scalar, K> Value;
typedef Vec<float, K> WeightVec;
MedianList(int N) : count(0), total(0.0f) {
for(int k = 0; k < K; ++k) lists[k].resize(N); // allocate space
}
inline void push(const Value &v, Weight w) {
for(int k = 0; k < K; ++k){
lists[k][count] = std::make_pair(v[k], w);
}
++count;
total += w;
}
inline void sort() {
for(int k = 0; k < K; ++k) std::sort(lists[k].begin(), lists[k].end(), ItemOrder());
}
inline Value get() const {
const Weight middle = total * 0.5f; // varies with the position (since boundary patches have smaller weights)
Value val; // value computation
WeightVec weight = WeightVec::zeros(); // weight storage
for(int i = 0; i < count; ++i) {
// separately for each channel
bool more = false;
for(int k = 0; k < K; ++k) {
const Item &it = lists[k][i];
if(weight[k] < middle){
weight[k] += it.second;
// if we went past the center, that's the median
if(weight[k] >= middle) val[k] = it.first;
else more = true; // else we need to go farther
} // else nothing more
}
if(!more) break;
}
return val;
}
private:
int count;
Weight total;
std::vector<Item> lists[K];
};
template <int channels, typename Patch, typename Scalar>
Image vote_median_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
const Texture *source = nnf->source;
const Texture *target = nnf->target;
Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels));
// weights recording
if (params.weights != NULL) {
std::fill(params.weights, params.weights + (source->cols * source->rows), 0.0f);
}
// for each pixel of the source
#if _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int y = 0; y < source->rows; ++y) {
for (int x = 0; x < source->cols; ++x) {
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, nnf->width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, nnf->height);
// sort pixel values
int pw = endX - startX;
int ph = endY - startY;
int N = pw * ph;
MedianList<channels, Scalar> points(N); //< data points
int pidx = 0;
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px, ++pidx) {
const Patch &patch = nnf->get(py, px);
int by = y - py; // location in the patch
int bx = x - px;
points.push(target->at<PixVal>(patch.transform(by, bx)), params.filter[by][bx]);
}
}
// sort data, channel by channel
points.sort();
// get the sort-of median
vote.at<PixVal>(y, x) = points.get();
switch(params.weightType) {
case PixelWeight:
params.weights[vote.cols * y + x] = 1.0f;
break;
case PixelVariance:
params.weights[vote.cols * y + x] = pixelvar(
nnf,
y, startY, endY,
x, startX, endX,
vote.at<PixVal>(y, x)
);
break;
}
}
}
return vote;
}
/**
* \brief Vote by using a weighted filter over overlapping patches
* that is reweighted as the histogram distribution changes.
*
* \param nnf
* the nearest neighbor field to vote with
* \param params
* the voting parameters
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_histogram_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
const Texture *T = nnf->source;
const Texture *E = nnf->target;
Image vote = Image::zeros(T->rows, T->cols, IM_32FC(channels));
int numPixels = T->rows * T->cols;
float areaT = numPixels, areaE = E->rows * E->cols;
const Filter &filter = params.filter;
// 1 = compute the histograms
typedef voting::Histogram<Patch, Scalar> Hist;
const int K = params.histChannels.size();
std::vector<Hist> histT(K); // to be changed
std::vector<Hist> histE(K); // DO NOT change
for (int i = 0; i < K; ++i) {
histT[i] = Hist(T, params.histChannels[i], params.histBins[i], params.histRanges[i]);
histE[i] = Hist(E, params.histChannels[i], params.histBins[i], params.histRanges[i]);
}
// 2 = random traversal
std::vector<Point<int> > index;
index.resize(numPixels);
for (int y = 0, i = 0; y < T->rows; ++y) {
for (int x = 0; x < T->cols; ++x, ++i) {
index[i] = Point2i(x, y);
}
}
knuth_shuffle(unif01, &index[0], numPixels);
// 3 = histogram-weighted voting
// for each pixel of the source
float ratioTE = areaT / areaE;
for (int idx = 0; idx < numPixels; ++idx) {
// the index
int y = index[idx].y;
int x = index[idx].x;
// std::cout << "#" << idx << " @(" << y << " / " << x << ")\n";
float weightSum = 0.0f;
PixVal &votedPixel = vote.at<PixVal>(y, x);
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, nnf->width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, nnf->height);
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
const Patch &patch = nnf->get(py, px);
int by = y - py; //< pixelInPatch = pos - patchPos
int bx = x - px;
PixVal value = E->at<PixVal>(patch.transform(by, bx));
// reweighting using the histogram
// w(p) = w * (1 + \sum_k a_k [hb_trg - hb_src]+) / (1 + \sum_k b_k [hb_src - hb_trg]+)
float dA = 1.0f, dB = 1.0f;
for (int k = 0; k < K; ++k) {
int ch = params.histChannels[k];
int hbT = histT[k].count(value[ch]);
int hbE = histE[k].count(value[ch]);
float da;
if (params.histNormalize) {
da = float(hbE) * ratioTE - float(hbT);
} else {
da = hbE - hbT;
}
float db = -da;
if (params.histBoosts.size() > 0 && da > 0) dA += params.histBoosts[k] * da;
if (db > 0) dB += params.histWeights[k] * db;
}
float w = filter[by][bx] * dA / dB;
votedPixel += value * w;
weightSum += w;
}
}
if (weightSum > 1e-8) {
votedPixel *= 1.0 / weightSum;
} else {
std::cerr << "Dangerously low weight @" << y << "/" << x << " = " << weightSum << "\n";
}
// binary channels
for(int i = 0; i < params.binaryChannels.size(); ++i) {
Scalar &v = votedPixel[params.binaryChannels[i]];
if(v >= 50.0){
v = 100.0;
} else {
v = 0.0;
}
}
// update the source histograms
const Scalar *prevPixel = T->ptr<Scalar>(y, x);
for (int k = 0; k < K; ++k) {
Hist &h = histT[k];
int ch = params.histChannels[k];
// previous value
float v = prevPixel[ch];
h.count(v) -= 1;
// new value
v = votedPixel[ch];
h.count(v) += 1;
}
// store in output weight map if there is one
switch(params.weightType) {
case PixelWeight:
params.weights[vote.cols * y + x] = weightSum;
break;
case PixelVariance:
params.weights[vote.cols * y + x] = pixelvar(
nnf,
y, startY, endY,
x, startX, endX,
votedPixel
);
break;
}
}
return vote;
}
/**
* \brief Vote using a mask as weight for the pixels
*
* \param nnf
* the nearest neighbor field to vote with
* \param params
* the voting parameters
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_weighted_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
const Texture *source = nnf->source;
const Texture *target = nnf->target;
Image vote = Image::zeros(source->rows, source->cols, IM_32FC(channels));
const Mask &mask = params.weightMask;
const float base = params.weightBase;
const int midP = Patch::width() / 2;
// for each pixel of the source
#if _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int y = 0; y < source->rows; ++y) {
for (int x = 0; x < source->cols; ++x) {
PixVal &votedPixel = vote.at<PixVal>(y, x);
float wSum = 0.0f;
// pixel distance
float pxDist = mask.at<float>(
std::min(std::max(y - midP, 0), mask.rows - 1),
std::min(std::max(x - midP, 0), mask.cols - 1)
);
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, nnf->width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, nnf->height);
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
const Patch &patch = nnf->get(py, px);
// /!\ Convolution: the filter is reversed in time
// => at pos (x,y), the filter is at (0, 0)
// => at pos (x-dx, y-dy), the filter is at (dx, dy)
// ++ we assume that only patches on the left / top have a contribution
int by = y - py; //< pixelInPatch = pos - patchPos
int bx = x - px;
PixVal value = target->at<PixVal>(patch.transform(by, bx));
float w = std::pow(base, -(mask.at<float>(py, px) - pxDist));
votedPixel += value * w;
wSum += w;
}
}
if (wSum > 1e-8) {
votedPixel *= 1.0 / wSum;
} else {
#if _OPENMP
#pragma omp critical
#endif
{
std::cerr << "w0 (" << wSum << ") @" << y << "/" << x << ", from: " << startY << "/" << startX;
std::cerr << " to " << endY << "/" << endX << "\n";
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
float w = std::pow(2.0f, -(mask.at<float>(py, px) - pxDist));
std::cerr << "(" << py << ", " << px << ") -> " << mask.at<float>(py, px) << " -> " << w << ", ";
}
}
std::cerr << "\n\n";
}
}
// binary channels
for(int i = 0; i < params.binaryChannels.size(); ++i) {
Scalar &v = votedPixel[params.binaryChannels[i]];
if(v >= 50.0){
v = 100.0;
} else {
v = 0.0;
}
}
// store in output weight map if there is one
switch(params.weightType) {
case PixelWeight:
params.weights[vote.cols * y + x] = wSum;
break;
case PixelVariance:
params.weights[vote.cols * y + x] = pixelvar(
nnf,
y, startY, endY,
x, startX, endX,
votedPixel
);
break;
}
}
}
return vote;
}
/**
* \brief Vote using a bidirectional similarity measure
*
* \param nnfs
* array containing [0]:the T to S nnf, [1]: the S to T nnf
* \param params
* the voting parameters including the S to T nnf
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_bidir_sim_n(const NearestNeighborField<Patch, Scalar> *nnfs,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
typedef NearestNeighborField<Patch, Scalar> NNF;
typedef typename NNF::SourcePatch SourcePatch;
typedef typename NNF::TargetPatch TargetPatch;
const NNF &dirNNF = nnfs[0]; // direct T to S nnf
const NNF &revNNF = nnfs[1]; // reverse S to T nnf
const Texture *T = dirNNF.source; // T in bidir sim
const Texture *S = dirNNF.target; // S in bidir sim
Image vote = Image::zeros(T->rows, T->cols, IM_32FC(channels));
const Filter &filter = params.filter;
float *weights;
// set weights to 0 to start
if (params.weights != NULL) {
weights = params.weights;
std::fill(weights, weights + (T->height * T->width), 0.0f);
} else {
weights = new float[T->height * T->width](); // init to 0
}
// group weights
float dirWeight, revWeight;
{
double regularizer = std::max(dirNNF.size, revNNF.size);
double dw = (1.0 - params.bidirSimWeight) * regularizer / dirNNF.size;
double rw = params.bidirSimWeight * regularizer / revNNF.size;
dirWeight = float(dw);
revWeight = float(rw);
std::cout << "dw: " << dirWeight << ", rw: " << revWeight << "\n";
}
// process the reverse nnf first
for (int y = 0; y < revNNF.height; ++y) {
for(int x = 0; x < revNNF.width; ++x) {
SourcePatch p(y, x);
const TargetPatch &q = revNNF.get(y, x);
// for each pixel from the patch p in S
for (typename SourcePatch::IndexIterator it = p.begin(); it; ++it) {
typename SourcePatch::Index i = *it;
const Point2i lp(p.transform(i));
PixVal pix = S->at<PixVal>(lp); // transformation in S space
// target in T
const Point2i lq(q * i); // q.transform(i)
const Point2i delta = lp - Point2i(x, y); // (x,y) is at the top-left of the patch
float w = filter[delta.y][delta.x] * revWeight;
vote.at<PixVal>(lq.y, lq.x) += pix * w;
// store weight (count for Ns when filter = 1)
weights[vote.cols * lq.y + lq.x] += w;
}
}
}
// process the direct nnf finally
#if _OPENMP
#pragma omp parallel for collapse(2)
#endif
for (int y = 0; y < T->rows; ++y) {
for (int x = 0; x < T->cols; ++x) {
PixVal &votedPixel = vote.at<PixVal>(y, x);
float wT = 0.0f;
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, dirNNF.width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, dirNNF.height);
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
const TargetPatch &patch = dirNNF.get(py, px);
// /!\ Convolution: the filter is reversed in time
// => at pos (x,y), the filter is at (0, 0)
// => at pos (x-dx, y-dy), the filter is at (dx, dy)
// ++ we assume that only patches on the left / top have a contribution
int by = y - py; //< pixelInPatch = pos - patchPos
int bx = x - px;
PixVal value = S->at<PixVal>(patch.transform(by, bx));
float w = filter[by][bx] * dirWeight;
votedPixel += value * w;
wT += w;
}
}
float wS = weights[T->cols * y + x];
float wSum = wT + wS;
weights[T->cols * y + x] = wSum;
if (wSum > 1e-8) {
votedPixel *= 1.0 / wSum;
} else {
#if _OPENMP
#pragma omp critical
#endif
{
std::cerr << "w0 @" << y << "/" << x << ", from: " << startY << "/" << startX;
std::cerr << " to " << endY << "/" << endX << "\n";
}
}
// binary channels
for(int i = 0; i < params.binaryChannels.size(); ++i) {
Scalar &v = votedPixel[params.binaryChannels[i]];
if(v >= 50.0){
v = 100.0;
} else {
v = 0.0;
}
}
}
}
// free weights if only used locally
if(params.weights == NULL) {
delete[] weights;
}
return vote;
}
/**
* \brief Vote using a bidirectional similarity measure and histograms
*
* \param nnfs
* array containing [0]:the T to S nnf, [1]: the S to T nnf
* \param params
* the voting parameters including the S to T nnf
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_bidir_sim_histogram_n(const NearestNeighborField<Patch, Scalar> *nnfs,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
typedef NearestNeighborField<Patch, Scalar> NNF;
typedef typename NNF::SourcePatch SourcePatch;
typedef typename NNF::TargetPatch TargetPatch;
const NNF &dirNNF = nnfs[0]; // direct T to S nnf
const NNF &revNNF = nnfs[1]; // reverse S to T nnf
const Texture *T = dirNNF.source; // T in bidir sim
const Texture *E = dirNNF.target; // S in bidir sim
Image vote = Image::zeros(T->rows, T->cols, IM_32FC(channels));
// const Filter &filter = params.filter;
float *weights;
// set weights to 0 to start
if (params.weights != NULL) {
weights = params.weights;
std::fill(weights, weights + (T->height * T->width), 0.0f);
} else {
weights = new float[T->height * T->width](); // init to 0
}
// 1 = constructor accumulator from revNNF
std::vector<Point2i> *accum = new std::vector<Point2i>[T->height * T->width](); // init vectors
if(accum == NULL){
mexErrMsgIdAndTxt("MATLAB:vote:new", "Failed allocation of accum in BidirSimVoting with Histograms");
}
// using the reverse nnf first
for (int y = 0; y < revNNF.height; ++y) {
for(int x = 0; x < revNNF.width; ++x) {
SourcePatch p(y, x);
const TargetPatch &q = revNNF.get(y, x);
// for each pixel from the patch p in S
for (typename SourcePatch::IndexIterator it = p.begin(); it; ++it) {
typename SourcePatch::Index i = *it;
// pixel location in E
const Point2i lp(p.transform(i));
// pixel location in T
const Point2i lq(q.transform(i));
accum[vote.cols * lq.y + lq.x].push_back(lp);
}
}
}
// group weights
float dirWeight, revWeight;
{
double regularizer = std::max(dirNNF.size, revNNF.size);
double dw = (1.0 - params.bidirSimWeight) * regularizer / dirNNF.size;
double rw = params.bidirSimWeight * regularizer / revNNF.size;
dirWeight = float(dw);
revWeight = float(rw);
std::cout << "dw: " << dirWeight << ", rw: " << revWeight << "\n";
}
// histogram voting now, using both the direct NNF and the accumulator
int numPixels = T->rows * T->cols;
float areaT = numPixels, areaE = E->rows * E->cols;
float ratioTE = areaT / areaE;
// 2 = compute the histograms
typedef voting::Histogram<Patch, Scalar> Hist;
const int K = params.histChannels.size();
std::vector<Hist> histT(K); // to be changed
std::vector<Hist> histE(K); // DO NOT change
for (int i = 0; i < K; ++i) {
histT[i] = Hist(T, params.histChannels[i], params.histBins[i], params.histRanges[i]);
histE[i] = Hist(E, params.histChannels[i], params.histBins[i], params.histRanges[i]);
}
// 2 = random traversal
std::vector<Point2i> index;
index.resize(numPixels);
for (int y = 0, i = 0; y < T->rows; ++y) {
for (int x = 0; x < T->cols; ++x, ++i) {
index[i] = Point2i(x, y);
}
}
knuth_shuffle(unif01, &index[0], numPixels);
// 3 = histogram-weighted voting
// for each pixel of the source
// + the accumulated pixels with revNNF
for (int idx = 0; idx < numPixels; ++idx) {
// the index
int y = index[idx].y;
int x = index[idx].x;
float weightSum = 0.0f;
PixVal &votedPixel = vote.at<PixVal>(y, x);
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, dirNNF.width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, dirNNF.height);
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
const Patch &patch = dirNNF.get(py, px);
int by = y - py; //< pixelInPatch = pos - patchPos
int bx = x - px;
PixVal value = E->at<PixVal>(patch.transform(by, bx));
// reweighting using the histogram
// w(p) = w * (1 + \sum_k a_k [hb_trg - hb_src]+) / (1 + \sum_k b_k [hb_src - hb_trg]+)
float dH = 1.0f;
for (int k = 0; k < K; ++k) {
int ch = params.histChannels[k];
int hbT = histT[k].count(value[ch]);
int hbE = histE[k].count(value[ch]);
float dh;
if (params.histNormalize) {
dh = float(hbT) - float(hbE) * ratioTE; // relatively to T
} else {
dh = hbT - hbE;
}
if (dh > 0) dH += params.histWeights[k] * dh;
}
// filter[by][bx] / dH
float w = dirWeight / dH; // Note: filter is not used in E!
votedPixel += value * w;
weightSum += w;
}
}
// for each pixel from the accumulated map
const std::vector<Point2i> &pixelAccum = accum[vote.cols * y + x];
for (int i = 0, n = pixelAccum.size(); i < n; ++i) {
const Point2i &p = pixelAccum[i];
PixVal value = E->at<PixVal>(p);
// reweighting using the histogram
// w(p) = w * (1 + \sum_k a_k [hb_trg - hb_src]+) / (1 + \sum_k b_k [hb_src - hb_trg]+)
float dH = 1.0f;
for (int k = 0; k < K; ++k) {
int ch = params.histChannels[k];
int hbT = histT[k].count(value[ch]);
int hbE = histE[k].count(value[ch]);
float dh;
if (params.histNormalize) {
dh = float(hbT) - float(hbE) * ratioTE; // relatively to T
} else {
dh = hbE - hbT;
}
if (dh > 0) dH += params.histWeights[k] * dh;
}
float w = revWeight / dH;
votedPixel += value * w;
weightSum += w;
}
if (weightSum > 1e-8) {
votedPixel *= 1.0 / weightSum;
} else {
std::cerr << "Dangerously low weight @" << y << "/" << x << " = " << weightSum << "\n";
}
// binary channels
for(int i = 0; i < params.binaryChannels.size(); ++i) {
Scalar &v = votedPixel[params.binaryChannels[i]];
if(v >= 50.0){
v = 100.0;
} else {
v = 0.0;
}
}
// update the source histograms
const Scalar *prevPixel = T->ptr<Scalar>(y, x);
for (int k = 0; k < K; ++k) {
float v;
Hist &h = histT[k];
int ch = params.histChannels[k];
// previous value
v = prevPixel[ch];
h.count(v) -= 1;
// new value
v = votedPixel[ch];
h.count(v) += 1;
}
// store in output weight map if there is one
switch(params.weightType) {
case PixelWeight:
params.weights[vote.cols * y + x] = weightSum;
break;
case PixelVariance:
params.weights[vote.cols * y + x] = pixelvar(
&dirNNF,
y, startY, endY,
x, startX, endX,
votedPixel
);
break;
}
}
// free memory
delete[] accum;
if(params.weights == NULL) {
delete[] weights;
}
// done!
return vote;
}
/**
* \brief Feature-specific voting and histogram voting for the rest
*
* \param nnf
* the nearest neighbor field to vote with
* \param params
* the voting parameters
* \return the voted picture
*/
template <int channels, typename Patch, typename Scalar>
Image vote_feature_histogram_n(const NearestNeighborField<Patch, Scalar> *nnf,
VoteParams ¶ms) {
typedef Vec<Scalar, channels> PixVal;
const Texture *T = nnf->source;
const Texture *E = nnf->target;
Image vote = Image::zeros(T->rows, T->cols, IM_32FC(channels));
int numPixels = T->rows * T->cols;
float areaT = numPixels, areaE = E->rows * E->cols;
const Filter &filter = params.filter;
// 1 = compute the histograms
typedef voting::Histogram<Patch, Scalar> Hist;
const int K = params.histChannels.size();
std::vector<Hist> histT(K); // to be changed
std::vector<Hist> histE(K); // DO NOT change
for (int i = 0; i < K; ++i) {
histT[i] = Hist(T, params.histChannels[i], params.histBins[i], params.histRanges[i]);
histE[i] = Hist(E, params.histChannels[i], params.histBins[i], params.histRanges[i]);
}
// the feature channels
std::vector<unsigned int> features;
for(unsigned int c = 0; c < channels; ++c) {
bool isFeature = true;
for(unsigned int k = 0; k < K; ++k) {
if(c == params.histChannels[k]){
std::cout << "H" << c << ", ";
isFeature = false;
break;
}
}
if(isFeature) {
std::cout << "F" << c << ", ";
features.push_back(c);
}
}
std::cout << "\n";
// 2 = random traversal
std::vector<Point<int> > index;
index.resize(numPixels);
for (int y = 0, i = 0; y < T->rows; ++y) {
for (int x = 0; x < T->cols; ++x, ++i) {
index[i] = Point2i(x, y);
}
}
knuth_shuffle(unif01, &index[0], numPixels);
// 3 = histogram-weighted voting
// for each pixel of the source
float ratioTE = areaT / areaE;
for (int idx = 0; idx < numPixels; ++idx) {
// the index
int y = index[idx].y;
int x = index[idx].x;
// std::cout << "#" << idx << " @(" << y << " / " << x << ")\n";
float weightSum = 0.0f;
PixVal &votedPixel = vote.at<PixVal>(y, x);
for(int f = 0; f < features.size(); ++f) {
votedPixel[features[f]] = FLT_MAX;
}
// for each pixel from the corresponding patches
int startX = std::max(0, x - Patch::width() + 1);
int endX = std::min(x + 1, nnf->width);
int startY = std::max(0, y - Patch::width() + 1);
int endY = std::min(y + 1, nnf->height);
for (int py = startY; py < endY; ++py) {
for (int px = startX; px < endX; ++px) {
const Patch &patch = nnf->get(py, px);
int by = y - py; //< pixelInPatch = pos - patchPos
int bx = x - px;
PixVal value = E->at<PixVal>(patch.transform(by, bx));
// extract feature data
PixVal F;
for(int f = 0; f < features.size(); ++f){
int c = features[f];
F[c] = value[c];
value[c] = Scalar(0.0);
}
// reweighting using the histogram
// w(p) = w * (1 + \sum_k a_k [hb_trg - hb_src]+) / (1 + \sum_k b_k [hb_src - hb_trg]+)
float dA = 1.0f, dB = 1.0f;
for (int k = 0; k < K; ++k) {
int ch = params.histChannels[k];
int hbT = histT[k].count(value[ch]);
int hbE = histE[k].count(value[ch]);
float da;
if (params.histNormalize) {
da = float(hbE) * ratioTE - float(hbT);
} else {
da = hbE - hbT;
}
float db = -da;
if (params.histBoosts.size() > 0 && da > 0) dA += params.histBoosts[k] * da;
if (db > 0) dB += params.histWeights[k] * db;
}
float w = filter[by][bx] * dA / dB;
votedPixel += value * w;
weightSum += w;
// feature specific values
for(int f = 0; f < features.size(); ++f) {
int c = features[f];
votedPixel[c] = params.featureOp(votedPixel[c], F[c]);
}
}
}
float invWeight = 1.0f / weightSum;
for(int i = 0; i < K; ++i) votedPixel[params.histChannels[i]] *= invWeight; // c cannot intersect with k!
if (weightSum <= 1e-8) {
std::cerr << "Dangerously low weight @" << y << "/" << x << " = " << weightSum << "\n";
}
// binary channels
for(int i = 0; i < params.binaryChannels.size(); ++i) {
Scalar &v = votedPixel[params.binaryChannels[i]];
if(v >= 50.0){
v = 100.0;
} else {
v = 0.0;
}
}
// update the source histograms
const Scalar *prevPixel = T->ptr<Scalar>(y, x);
for (int k = 0; k < K; ++k) {
Hist &h = histT[k];
int ch = params.histChannels[k];
// previous value
float v = prevPixel[ch];
h.count(v) -= 1;
// new value
v = votedPixel[ch];
h.count(v) += 1;
}
// store in output weight map if there is one
switch(params.weightType) {
case PixelWeight:
params.weights[vote.cols * y + x] = weightSum;
break;
case PixelVariance:
params.weights[vote.cols * y + x] = pixelvar(
nnf,
y, startY, endY,
x, startX, endX,
votedPixel
);
break;
}
}
return vote;
}
// #########################################################################
// ##### Multi-channels voting #############################################
// #########################################################################
template <typename Patch, typename Scalar, int channels>
struct MultiChannelVote {
inline Image operator()(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams ¶ms) const {
if (nnf->source->channels() == channels) {
switch (params.method) {
case BiDirSimVoting:
return vote_bidir_sim_n<channels>(nnf, params);
case BiDirSimWithHistogramVoting:
return vote_bidir_sim_histogram_n<channels>(nnf, params);
case DefaultVoting:
return vote_filter_n<channels>(nnf, params);
case DefaultGBAVoting:
return vote_filter_gba_n<channels>(nnf, params);
case DirectVoting:
return vote_direct_n<channels>(nnf, params);
case FeatureWithHistogramVoting:
return vote_feature_histogram_n<channels>(nnf, params);
case HistogramVoting:
return vote_histogram_n<channels>(nnf, params);
case MeanShiftVoting:
return vote_meanshift_n<channels>(nnf, params);
case MedianVoting:
return vote_median_n<channels>(nnf, params);
case TiledVoting:
return vote_filter_tiled_n<channels>(nnf, params);
case WeightedVoting:
return vote_weighted_n<channels>(nnf, params);
default:
std::cerr << "Invalid voting method: " << params.method << " !";
return Image();
}
} else {
MultiChannelVote<Patch, Scalar, channels + 1 > vop;
return vop(nnf, params);
}
}
};
#ifndef MAX_SUPPORTED_CHANNELS
#define MAX_SUPPORTED_CHANNELS 12
#endif
template <typename Patch, typename Scalar>
struct MultiChannelVote<Patch, Scalar, MAX_SUPPORTED_CHANNELS + 1 > {
inline Image operator()(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams ¶ms) const {
std::cerr << "Warning: voting only supported up to " << MAX_SUPPORTED_CHANNELS << " channels!\n";
return Image();
}
};
template <typename Patch, typename Scalar>
Image vote(const NearestNeighborField<Patch, Scalar> *nnf, VoteParams ¶ms) {
MultiChannelVote<Patch, Scalar, 1> vop;
return vop(nnf, params);
}
}
#endif /* VOTE_H */
|
otfft_avxdit16omp.h | /******************************************************************************
* OTFFT AVXDIT(Radix-16) of OpenMP Version 6.5
*
* Copyright (c) 2015 OK Ojisan(Takuya OKAHISA)
* Released under the MIT license
* http://opensource.org/licenses/mit-license.php
******************************************************************************/
#ifndef otfft_avxdit16omp_h
#define otfft_avxdit16omp_h
//#include "otfft/otfft_misc.h"
//#include "otfft_avxdit8.h"
namespace OTFFT_AVXDIT16omp { /////////////////////////////////////////////////
using namespace OTFFT_MISC;
///////////////////////////////////////////////////////////////////////////////
// Forward buffterfly operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct fwdcore
{
static const int n1 = n/16;
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int i = 0; i < N/32; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s16p = 16*sp;
const ymm w1p = duppz3(W[1*sp]);
const ymm w2p = duppz3(W[2*sp]);
const ymm w3p = duppz3(W[3*sp]);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
complex_vector xq_sp = x + q + sp;
complex_vector yq_s16p = y + q + s16p;
const ymm y0 = getpz2(yq_s16p+s*0x0);
const ymm y1 = mulpz2(w1p, getpz2(yq_s16p+s*0x1));
const ymm y2 = mulpz2(w2p, getpz2(yq_s16p+s*0x2));
const ymm y3 = mulpz2(w3p, getpz2(yq_s16p+s*0x3));
const ymm y4 = mulpz2(w4p, getpz2(yq_s16p+s*0x4));
const ymm y5 = mulpz2(w5p, getpz2(yq_s16p+s*0x5));
const ymm y6 = mulpz2(w6p, getpz2(yq_s16p+s*0x6));
const ymm y7 = mulpz2(w7p, getpz2(yq_s16p+s*0x7));
const ymm y8 = mulpz2(w8p, getpz2(yq_s16p+s*0x8));
const ymm y9 = mulpz2(w9p, getpz2(yq_s16p+s*0x9));
const ymm ya = mulpz2(wap, getpz2(yq_s16p+s*0xa));
const ymm yb = mulpz2(wbp, getpz2(yq_s16p+s*0xb));
const ymm yc = mulpz2(wcp, getpz2(yq_s16p+s*0xc));
const ymm yd = mulpz2(wdp, getpz2(yq_s16p+s*0xd));
const ymm ye = mulpz2(wep, getpz2(yq_s16p+s*0xe));
const ymm yf = mulpz2(wfp, getpz2(yq_s16p+s*0xf));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq_sp+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+N2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+N3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+N4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+N5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+N6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+N7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+Na, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+Nb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+Nc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+Nd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+Ne, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+Nf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <int N> struct fwdcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_16p = y + 16*p;
const ymm w1p = getpz2(W+p);
const ymm w2p = mulpz2(w1p, w1p);
const ymm w3p = mulpz2(w1p, w2p);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
#if 0
const ymm y0 = getpz3<16>(y_16p+0x0);
const ymm y1 = mulpz2(w1p, getpz3<16>(y_16p+0x1));
const ymm y2 = mulpz2(w2p, getpz3<16>(y_16p+0x2));
const ymm y3 = mulpz2(w3p, getpz3<16>(y_16p+0x3));
const ymm y4 = mulpz2(w4p, getpz3<16>(y_16p+0x4));
const ymm y5 = mulpz2(w5p, getpz3<16>(y_16p+0x5));
const ymm y6 = mulpz2(w6p, getpz3<16>(y_16p+0x6));
const ymm y7 = mulpz2(w7p, getpz3<16>(y_16p+0x7));
const ymm y8 = mulpz2(w8p, getpz3<16>(y_16p+0x8));
const ymm y9 = mulpz2(w9p, getpz3<16>(y_16p+0x9));
const ymm ya = mulpz2(wap, getpz3<16>(y_16p+0xa));
const ymm yb = mulpz2(wbp, getpz3<16>(y_16p+0xb));
const ymm yc = mulpz2(wcp, getpz3<16>(y_16p+0xc));
const ymm yd = mulpz2(wdp, getpz3<16>(y_16p+0xd));
const ymm ye = mulpz2(wep, getpz3<16>(y_16p+0xe));
const ymm yf = mulpz2(wfp, getpz3<16>(y_16p+0xf));
#else
const ymm ab = getpz2(y_16p+0x00);
const ymm cd = getpz2(y_16p+0x02);
const ymm ef = getpz2(y_16p+0x04);
const ymm gh = getpz2(y_16p+0x06);
const ymm ij = getpz2(y_16p+0x08);
const ymm kl = getpz2(y_16p+0x0a);
const ymm mn = getpz2(y_16p+0x0c);
const ymm op = getpz2(y_16p+0x0e);
const ymm AB = getpz2(y_16p+0x10);
const ymm CD = getpz2(y_16p+0x12);
const ymm EF = getpz2(y_16p+0x14);
const ymm GH = getpz2(y_16p+0x16);
const ymm IJ = getpz2(y_16p+0x18);
const ymm KL = getpz2(y_16p+0x1a);
const ymm MN = getpz2(y_16p+0x1c);
const ymm OP = getpz2(y_16p+0x1e);
const ymm y0 = catlo(ab, AB);
const ymm y1 = mulpz2(w1p, cathi(ab, AB));
const ymm y2 = mulpz2(w2p, catlo(cd, CD));
const ymm y3 = mulpz2(w3p, cathi(cd, CD));
const ymm y4 = mulpz2(w4p, catlo(ef, EF));
const ymm y5 = mulpz2(w5p, cathi(ef, EF));
const ymm y6 = mulpz2(w6p, catlo(gh, GH));
const ymm y7 = mulpz2(w7p, cathi(gh, GH));
const ymm y8 = mulpz2(w8p, catlo(ij, IJ));
const ymm y9 = mulpz2(w9p, cathi(ij, IJ));
const ymm ya = mulpz2(wap, catlo(kl, KL));
const ymm yb = mulpz2(wbp, cathi(kl, KL));
const ymm yc = mulpz2(wcp, catlo(mn, MN));
const ymm yd = mulpz2(wdp, cathi(mn, MN));
const ymm ye = mulpz2(wep, catlo(op, OP));
const ymm yf = mulpz2(wfp, cathi(op, OP));
#endif
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(x_p+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+N2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+N3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+N4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+N5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+N6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+N7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+Na, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+Nb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+Nc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+Nd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+Ne, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+Nf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct fwd0end;
//-----------------------------------------------------------------------------
template <int s> struct fwd0end<16,s,1>
{
void operator()(complex_vector x, complex_vector y) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector yq = y + q;
const ymm y0 = getpz2(yq+s*0x0);
const ymm y1 = getpz2(yq+s*0x1);
const ymm y2 = getpz2(yq+s*0x2);
const ymm y3 = getpz2(yq+s*0x3);
const ymm y4 = getpz2(yq+s*0x4);
const ymm y5 = getpz2(yq+s*0x5);
const ymm y6 = getpz2(yq+s*0x6);
const ymm y7 = getpz2(yq+s*0x7);
const ymm y8 = getpz2(yq+s*0x8);
const ymm y9 = getpz2(yq+s*0x9);
const ymm ya = getpz2(yq+s*0xa);
const ymm yb = getpz2(yq+s*0xb);
const ymm yc = getpz2(yq+s*0xc);
const ymm yd = getpz2(yq+s*0xd);
const ymm ye = getpz2(yq+s*0xe);
const ymm yf = getpz2(yq+s*0xf);
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <> struct fwd0end<16,1,1>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm y0 = getpz(y[0x0]);
const xmm y1 = getpz(y[0x1]);
const xmm y2 = getpz(y[0x2]);
const xmm y3 = getpz(y[0x3]);
const xmm y4 = getpz(y[0x4]);
const xmm y5 = getpz(y[0x5]);
const xmm y6 = getpz(y[0x6]);
const xmm y7 = getpz(y[0x7]);
const xmm y8 = getpz(y[0x8]);
const xmm y9 = getpz(y[0x9]);
const xmm ya = getpz(y[0xa]);
const xmm yb = getpz(y[0xb]);
const xmm yc = getpz(y[0xc]);
const xmm yd = getpz(y[0xd]);
const xmm ye = getpz(y[0xe]);
const xmm yf = getpz(y[0xf]);
const xmm a08 = addpz(y0, y8); const xmm s08 = subpz(y0, y8);
const xmm a4c = addpz(y4, yc); const xmm s4c = subpz(y4, yc);
const xmm a2a = addpz(y2, ya); const xmm s2a = subpz(y2, ya);
const xmm a6e = addpz(y6, ye); const xmm s6e = subpz(y6, ye);
const xmm a19 = addpz(y1, y9); const xmm s19 = subpz(y1, y9);
const xmm a5d = addpz(y5, yd); const xmm s5d = subpz(y5, yd);
const xmm a3b = addpz(y3, yb); const xmm s3b = subpz(y3, yb);
const xmm a7f = addpz(y7, yf); const xmm s7f = subpz(y7, yf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
//-----------------------------------------------------------------------------
template <int s> struct fwd0end<16,s,0>
{
void operator()(complex_vector x, complex_vector) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
const ymm x0 = getpz2(xq+s*0x0);
const ymm x1 = getpz2(xq+s*0x1);
const ymm x2 = getpz2(xq+s*0x2);
const ymm x3 = getpz2(xq+s*0x3);
const ymm x4 = getpz2(xq+s*0x4);
const ymm x5 = getpz2(xq+s*0x5);
const ymm x6 = getpz2(xq+s*0x6);
const ymm x7 = getpz2(xq+s*0x7);
const ymm x8 = getpz2(xq+s*0x8);
const ymm x9 = getpz2(xq+s*0x9);
const ymm xa = getpz2(xq+s*0xa);
const ymm xb = getpz2(xq+s*0xb);
const ymm xc = getpz2(xq+s*0xc);
const ymm xd = getpz2(xq+s*0xd);
const ymm xe = getpz2(xq+s*0xe);
const ymm xf = getpz2(xq+s*0xf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <> struct fwd0end<16,1,0>
{
inline void operator()(complex_vector x, complex_vector) const noexcept
{
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = getpz(x[0x0]);
const xmm x1 = getpz(x[0x1]);
const xmm x2 = getpz(x[0x2]);
const xmm x3 = getpz(x[0x3]);
const xmm x4 = getpz(x[0x4]);
const xmm x5 = getpz(x[0x5]);
const xmm x6 = getpz(x[0x6]);
const xmm x7 = getpz(x[0x7]);
const xmm x8 = getpz(x[0x8]);
const xmm x9 = getpz(x[0x9]);
const xmm xa = getpz(x[0xa]);
const xmm xb = getpz(x[0xb]);
const xmm xc = getpz(x[0xc]);
const xmm xd = getpz(x[0xd]);
const xmm xe = getpz(x[0xe]);
const xmm xf = getpz(x[0xf]);
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct fwdnend;
//-----------------------------------------------------------------------------
template <int s> struct fwdnend<16,s,1>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
static const ymm rN = { 1.0/N, 1.0/N, 1.0/N, 1.0/N };
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector yq = y + q;
const ymm y0 = mulpd2(rN, getpz2(yq+s*0x0));
const ymm y1 = mulpd2(rN, getpz2(yq+s*0x1));
const ymm y2 = mulpd2(rN, getpz2(yq+s*0x2));
const ymm y3 = mulpd2(rN, getpz2(yq+s*0x3));
const ymm y4 = mulpd2(rN, getpz2(yq+s*0x4));
const ymm y5 = mulpd2(rN, getpz2(yq+s*0x5));
const ymm y6 = mulpd2(rN, getpz2(yq+s*0x6));
const ymm y7 = mulpd2(rN, getpz2(yq+s*0x7));
const ymm y8 = mulpd2(rN, getpz2(yq+s*0x8));
const ymm y9 = mulpd2(rN, getpz2(yq+s*0x9));
const ymm ya = mulpd2(rN, getpz2(yq+s*0xa));
const ymm yb = mulpd2(rN, getpz2(yq+s*0xb));
const ymm yc = mulpd2(rN, getpz2(yq+s*0xc));
const ymm yd = mulpd2(rN, getpz2(yq+s*0xd));
const ymm ye = mulpd2(rN, getpz2(yq+s*0xe));
const ymm yf = mulpd2(rN, getpz2(yq+s*0xf));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <> struct fwdnend<16,1,1>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
static const xmm rN = { 1.0/16, 1.0/16 };
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm y0 = mulpd(rN, getpz(y[0x0]));
const xmm y1 = mulpd(rN, getpz(y[0x1]));
const xmm y2 = mulpd(rN, getpz(y[0x2]));
const xmm y3 = mulpd(rN, getpz(y[0x3]));
const xmm y4 = mulpd(rN, getpz(y[0x4]));
const xmm y5 = mulpd(rN, getpz(y[0x5]));
const xmm y6 = mulpd(rN, getpz(y[0x6]));
const xmm y7 = mulpd(rN, getpz(y[0x7]));
const xmm y8 = mulpd(rN, getpz(y[0x8]));
const xmm y9 = mulpd(rN, getpz(y[0x9]));
const xmm ya = mulpd(rN, getpz(y[0xa]));
const xmm yb = mulpd(rN, getpz(y[0xb]));
const xmm yc = mulpd(rN, getpz(y[0xc]));
const xmm yd = mulpd(rN, getpz(y[0xd]));
const xmm ye = mulpd(rN, getpz(y[0xe]));
const xmm yf = mulpd(rN, getpz(y[0xf]));
const xmm a08 = addpz(y0, y8); const xmm s08 = subpz(y0, y8);
const xmm a4c = addpz(y4, yc); const xmm s4c = subpz(y4, yc);
const xmm a2a = addpz(y2, ya); const xmm s2a = subpz(y2, ya);
const xmm a6e = addpz(y6, ye); const xmm s6e = subpz(y6, ye);
const xmm a19 = addpz(y1, y9); const xmm s19 = subpz(y1, y9);
const xmm a5d = addpz(y5, yd); const xmm s5d = subpz(y5, yd);
const xmm a3b = addpz(y3, yb); const xmm s3b = subpz(y3, yb);
const xmm a7f = addpz(y7, yf); const xmm s7f = subpz(y7, yf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
//-----------------------------------------------------------------------------
template <int s> struct fwdnend<16,s,0>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector) const noexcept
{
static const ymm rN = { 1.0/N, 1.0/N, 1.0/N, 1.0/N };
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
const ymm x0 = mulpd2(rN, getpz2(xq+s*0x0));
const ymm x1 = mulpd2(rN, getpz2(xq+s*0x1));
const ymm x2 = mulpd2(rN, getpz2(xq+s*0x2));
const ymm x3 = mulpd2(rN, getpz2(xq+s*0x3));
const ymm x4 = mulpd2(rN, getpz2(xq+s*0x4));
const ymm x5 = mulpd2(rN, getpz2(xq+s*0x5));
const ymm x6 = mulpd2(rN, getpz2(xq+s*0x6));
const ymm x7 = mulpd2(rN, getpz2(xq+s*0x7));
const ymm x8 = mulpd2(rN, getpz2(xq+s*0x8));
const ymm x9 = mulpd2(rN, getpz2(xq+s*0x9));
const ymm xa = mulpd2(rN, getpz2(xq+s*0xa));
const ymm xb = mulpd2(rN, getpz2(xq+s*0xb));
const ymm xc = mulpd2(rN, getpz2(xq+s*0xc));
const ymm xd = mulpd2(rN, getpz2(xq+s*0xd));
const ymm xe = mulpd2(rN, getpz2(xq+s*0xe));
const ymm xf = mulpd2(rN, getpz2(xq+s*0xf));
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <> struct fwdnend<16,1,0>
{
inline void operator()(complex_vector x, complex_vector) const noexcept
{
static const xmm rN = { 1.0/16, 1.0/16 };
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = mulpd(rN, getpz(x[0x0]));
const xmm x1 = mulpd(rN, getpz(x[0x1]));
const xmm x2 = mulpd(rN, getpz(x[0x2]));
const xmm x3 = mulpd(rN, getpz(x[0x3]));
const xmm x4 = mulpd(rN, getpz(x[0x4]));
const xmm x5 = mulpd(rN, getpz(x[0x5]));
const xmm x6 = mulpd(rN, getpz(x[0x6]));
const xmm x7 = mulpd(rN, getpz(x[0x7]));
const xmm x8 = mulpd(rN, getpz(x[0x8]));
const xmm x9 = mulpd(rN, getpz(x[0x9]));
const xmm xa = mulpd(rN, getpz(x[0xa]));
const xmm xb = mulpd(rN, getpz(x[0xb]));
const xmm xc = mulpd(rN, getpz(x[0xc]));
const xmm xd = mulpd(rN, getpz(x[0xd]));
const xmm xe = mulpd(rN, getpz(x[0xe]));
const xmm xf = mulpd(rN, getpz(x[0xf]));
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Forward FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct fwd0fft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
fwd0fft<n/16,16*s,!eo>()(y, x, W);
fwdcore<n,s>()(x, y, W);
}
};
template <int s, bool eo> struct fwd0fft<16,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
fwd0end<16,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwd0fft<8,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT8omp::fwd0end<8,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwd0fft<4,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::fwd0end<4,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwd0fft<2,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::fwd0end<2,s,eo>()(x, y);
}
};
//-----------------------------------------------------------------------------
template <int n, int s, bool eo> struct fwdnfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
fwdnfft<n/16,16*s,!eo>()(y, x, W);
fwdcore<n,s>()(x, y, W);
}
};
template <int s, bool eo> struct fwdnfft<16,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
fwdnend<16,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwdnfft<8,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT8omp::fwdnend<8,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwdnfft<4,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::fwdnend<4,s,eo>()(x, y);
}
};
template <int s, bool eo> struct fwdnfft<2,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::fwdnend<2,s,eo>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse butterfly operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct invcore
{
static const int n1 = n/16;
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int i = 0; i < N/32; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s16p = 16*sp;
const ymm w1p = duppz3(W[N-1*sp]);
const ymm w2p = duppz3(W[N-2*sp]);
const ymm w3p = duppz3(W[N-3*sp]);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
complex_vector xq_sp = x + q + sp;
complex_vector yq_s16p = y + q + s16p;
const ymm y0 = getpz2(yq_s16p+s*0x0);
const ymm y1 = mulpz2(w1p, getpz2(yq_s16p+s*0x1));
const ymm y2 = mulpz2(w2p, getpz2(yq_s16p+s*0x2));
const ymm y3 = mulpz2(w3p, getpz2(yq_s16p+s*0x3));
const ymm y4 = mulpz2(w4p, getpz2(yq_s16p+s*0x4));
const ymm y5 = mulpz2(w5p, getpz2(yq_s16p+s*0x5));
const ymm y6 = mulpz2(w6p, getpz2(yq_s16p+s*0x6));
const ymm y7 = mulpz2(w7p, getpz2(yq_s16p+s*0x7));
const ymm y8 = mulpz2(w8p, getpz2(yq_s16p+s*0x8));
const ymm y9 = mulpz2(w9p, getpz2(yq_s16p+s*0x9));
const ymm ya = mulpz2(wap, getpz2(yq_s16p+s*0xa));
const ymm yb = mulpz2(wbp, getpz2(yq_s16p+s*0xb));
const ymm yc = mulpz2(wcp, getpz2(yq_s16p+s*0xc));
const ymm yd = mulpz2(wdp, getpz2(yq_s16p+s*0xd));
const ymm ye = mulpz2(wep, getpz2(yq_s16p+s*0xe));
const ymm yf = mulpz2(wfp, getpz2(yq_s16p+s*0xf));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq_sp+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+N2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+N3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+N4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+N5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+N6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+N7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+Na, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+Nb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+Nc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+Nd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+Ne, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+Nf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <int N> struct invcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_16p = y + 16*p;
const ymm w1p = cnjpz2(getpz2(W+p));
const ymm w2p = mulpz2(w1p, w1p);
const ymm w3p = mulpz2(w1p, w2p);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
#if 0
const ymm y0 = getpz3<16>(y_16p+0x0);
const ymm y1 = mulpz2(w1p, getpz3<16>(y_16p+0x1));
const ymm y2 = mulpz2(w2p, getpz3<16>(y_16p+0x2));
const ymm y3 = mulpz2(w3p, getpz3<16>(y_16p+0x3));
const ymm y4 = mulpz2(w4p, getpz3<16>(y_16p+0x4));
const ymm y5 = mulpz2(w5p, getpz3<16>(y_16p+0x5));
const ymm y6 = mulpz2(w6p, getpz3<16>(y_16p+0x6));
const ymm y7 = mulpz2(w7p, getpz3<16>(y_16p+0x7));
const ymm y8 = mulpz2(w8p, getpz3<16>(y_16p+0x8));
const ymm y9 = mulpz2(w9p, getpz3<16>(y_16p+0x9));
const ymm ya = mulpz2(wap, getpz3<16>(y_16p+0xa));
const ymm yb = mulpz2(wbp, getpz3<16>(y_16p+0xb));
const ymm yc = mulpz2(wcp, getpz3<16>(y_16p+0xc));
const ymm yd = mulpz2(wdp, getpz3<16>(y_16p+0xd));
const ymm ye = mulpz2(wep, getpz3<16>(y_16p+0xe));
const ymm yf = mulpz2(wfp, getpz3<16>(y_16p+0xf));
#else
const ymm ab = getpz2(y_16p+0x00);
const ymm cd = getpz2(y_16p+0x02);
const ymm ef = getpz2(y_16p+0x04);
const ymm gh = getpz2(y_16p+0x06);
const ymm ij = getpz2(y_16p+0x08);
const ymm kl = getpz2(y_16p+0x0a);
const ymm mn = getpz2(y_16p+0x0c);
const ymm op = getpz2(y_16p+0x0e);
const ymm AB = getpz2(y_16p+0x10);
const ymm CD = getpz2(y_16p+0x12);
const ymm EF = getpz2(y_16p+0x14);
const ymm GH = getpz2(y_16p+0x16);
const ymm IJ = getpz2(y_16p+0x18);
const ymm KL = getpz2(y_16p+0x1a);
const ymm MN = getpz2(y_16p+0x1c);
const ymm OP = getpz2(y_16p+0x1e);
const ymm y0 = catlo(ab, AB);
const ymm y1 = mulpz2(w1p, cathi(ab, AB));
const ymm y2 = mulpz2(w2p, catlo(cd, CD));
const ymm y3 = mulpz2(w3p, cathi(cd, CD));
const ymm y4 = mulpz2(w4p, catlo(ef, EF));
const ymm y5 = mulpz2(w5p, cathi(ef, EF));
const ymm y6 = mulpz2(w6p, catlo(gh, GH));
const ymm y7 = mulpz2(w7p, cathi(gh, GH));
const ymm y8 = mulpz2(w8p, catlo(ij, IJ));
const ymm y9 = mulpz2(w9p, cathi(ij, IJ));
const ymm ya = mulpz2(wap, catlo(kl, KL));
const ymm yb = mulpz2(wbp, cathi(kl, KL));
const ymm yc = mulpz2(wcp, catlo(mn, MN));
const ymm yd = mulpz2(wdp, cathi(mn, MN));
const ymm ye = mulpz2(wep, catlo(op, OP));
const ymm yf = mulpz2(wfp, cathi(op, OP));
#endif
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(x_p+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+N2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+N3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+N4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+N5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+N6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+N7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+Na, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+Nb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+Nc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+Nd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+Ne, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+Nf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct inv0end;
//-----------------------------------------------------------------------------
template <int s> struct inv0end<16,s,1>
{
void operator()(complex_vector x, complex_vector y) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector yq = y + q;
const ymm y0 = getpz2(yq+s*0x0);
const ymm y1 = getpz2(yq+s*0x1);
const ymm y2 = getpz2(yq+s*0x2);
const ymm y3 = getpz2(yq+s*0x3);
const ymm y4 = getpz2(yq+s*0x4);
const ymm y5 = getpz2(yq+s*0x5);
const ymm y6 = getpz2(yq+s*0x6);
const ymm y7 = getpz2(yq+s*0x7);
const ymm y8 = getpz2(yq+s*0x8);
const ymm y9 = getpz2(yq+s*0x9);
const ymm ya = getpz2(yq+s*0xa);
const ymm yb = getpz2(yq+s*0xb);
const ymm yc = getpz2(yq+s*0xc);
const ymm yd = getpz2(yq+s*0xd);
const ymm ye = getpz2(yq+s*0xe);
const ymm yf = getpz2(yq+s*0xf);
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <> struct inv0end<16,1,1>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm y0 = getpz(y[0x0]);
const xmm y1 = getpz(y[0x1]);
const xmm y2 = getpz(y[0x2]);
const xmm y3 = getpz(y[0x3]);
const xmm y4 = getpz(y[0x4]);
const xmm y5 = getpz(y[0x5]);
const xmm y6 = getpz(y[0x6]);
const xmm y7 = getpz(y[0x7]);
const xmm y8 = getpz(y[0x8]);
const xmm y9 = getpz(y[0x9]);
const xmm ya = getpz(y[0xa]);
const xmm yb = getpz(y[0xb]);
const xmm yc = getpz(y[0xc]);
const xmm yd = getpz(y[0xd]);
const xmm ye = getpz(y[0xe]);
const xmm yf = getpz(y[0xf]);
const xmm a08 = addpz(y0, y8); const xmm s08 = subpz(y0, y8);
const xmm a4c = addpz(y4, yc); const xmm s4c = subpz(y4, yc);
const xmm a2a = addpz(y2, ya); const xmm s2a = subpz(y2, ya);
const xmm a6e = addpz(y6, ye); const xmm s6e = subpz(y6, ye);
const xmm a19 = addpz(y1, y9); const xmm s19 = subpz(y1, y9);
const xmm a5d = addpz(y5, yd); const xmm s5d = subpz(y5, yd);
const xmm a3b = addpz(y3, yb); const xmm s3b = subpz(y3, yb);
const xmm a7f = addpz(y7, yf); const xmm s7f = subpz(y7, yf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
//-----------------------------------------------------------------------------
template <int s> struct inv0end<16,s,0>
{
void operator()(complex_vector x, complex_vector) const noexcept
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
const ymm x0 = getpz2(xq+s*0x0);
const ymm x1 = getpz2(xq+s*0x1);
const ymm x2 = getpz2(xq+s*0x2);
const ymm x3 = getpz2(xq+s*0x3);
const ymm x4 = getpz2(xq+s*0x4);
const ymm x5 = getpz2(xq+s*0x5);
const ymm x6 = getpz2(xq+s*0x6);
const ymm x7 = getpz2(xq+s*0x7);
const ymm x8 = getpz2(xq+s*0x8);
const ymm x9 = getpz2(xq+s*0x9);
const ymm xa = getpz2(xq+s*0xa);
const ymm xb = getpz2(xq+s*0xb);
const ymm xc = getpz2(xq+s*0xc);
const ymm xd = getpz2(xq+s*0xd);
const ymm xe = getpz2(xq+s*0xe);
const ymm xf = getpz2(xq+s*0xf);
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <> struct inv0end<16,1,0>
{
inline void operator()(complex_vector x, complex_vector) const noexcept
{
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = getpz(x[0x0]);
const xmm x1 = getpz(x[0x1]);
const xmm x2 = getpz(x[0x2]);
const xmm x3 = getpz(x[0x3]);
const xmm x4 = getpz(x[0x4]);
const xmm x5 = getpz(x[0x5]);
const xmm x6 = getpz(x[0x6]);
const xmm x7 = getpz(x[0x7]);
const xmm x8 = getpz(x[0x8]);
const xmm x9 = getpz(x[0x9]);
const xmm xa = getpz(x[0xa]);
const xmm xb = getpz(x[0xb]);
const xmm xc = getpz(x[0xc]);
const xmm xd = getpz(x[0xd]);
const xmm xe = getpz(x[0xe]);
const xmm xf = getpz(x[0xf]);
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct invnend;
//-----------------------------------------------------------------------------
template <int s> struct invnend<16,s,1>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
static const ymm rN = { 1.0/N, 1.0/N, 1.0/N, 1.0/N };
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector yq = y + q;
const ymm y0 = mulpd2(rN, getpz2(yq+s*0x0));
const ymm y1 = mulpd2(rN, getpz2(yq+s*0x1));
const ymm y2 = mulpd2(rN, getpz2(yq+s*0x2));
const ymm y3 = mulpd2(rN, getpz2(yq+s*0x3));
const ymm y4 = mulpd2(rN, getpz2(yq+s*0x4));
const ymm y5 = mulpd2(rN, getpz2(yq+s*0x5));
const ymm y6 = mulpd2(rN, getpz2(yq+s*0x6));
const ymm y7 = mulpd2(rN, getpz2(yq+s*0x7));
const ymm y8 = mulpd2(rN, getpz2(yq+s*0x8));
const ymm y9 = mulpd2(rN, getpz2(yq+s*0x9));
const ymm ya = mulpd2(rN, getpz2(yq+s*0xa));
const ymm yb = mulpd2(rN, getpz2(yq+s*0xb));
const ymm yc = mulpd2(rN, getpz2(yq+s*0xc));
const ymm yd = mulpd2(rN, getpz2(yq+s*0xd));
const ymm ye = mulpd2(rN, getpz2(yq+s*0xe));
const ymm yf = mulpd2(rN, getpz2(yq+s*0xf));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <> struct invnend<16,1,1>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
static const xmm rN = { 1.0/16, 1.0/16 };
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm y0 = mulpd(rN, getpz(y[0x0]));
const xmm y1 = mulpd(rN, getpz(y[0x1]));
const xmm y2 = mulpd(rN, getpz(y[0x2]));
const xmm y3 = mulpd(rN, getpz(y[0x3]));
const xmm y4 = mulpd(rN, getpz(y[0x4]));
const xmm y5 = mulpd(rN, getpz(y[0x5]));
const xmm y6 = mulpd(rN, getpz(y[0x6]));
const xmm y7 = mulpd(rN, getpz(y[0x7]));
const xmm y8 = mulpd(rN, getpz(y[0x8]));
const xmm y9 = mulpd(rN, getpz(y[0x9]));
const xmm ya = mulpd(rN, getpz(y[0xa]));
const xmm yb = mulpd(rN, getpz(y[0xb]));
const xmm yc = mulpd(rN, getpz(y[0xc]));
const xmm yd = mulpd(rN, getpz(y[0xd]));
const xmm ye = mulpd(rN, getpz(y[0xe]));
const xmm yf = mulpd(rN, getpz(y[0xf]));
const xmm a08 = addpz(y0, y8); const xmm s08 = subpz(y0, y8);
const xmm a4c = addpz(y4, yc); const xmm s4c = subpz(y4, yc);
const xmm a2a = addpz(y2, ya); const xmm s2a = subpz(y2, ya);
const xmm a6e = addpz(y6, ye); const xmm s6e = subpz(y6, ye);
const xmm a19 = addpz(y1, y9); const xmm s19 = subpz(y1, y9);
const xmm a5d = addpz(y5, yd); const xmm s5d = subpz(y5, yd);
const xmm a3b = addpz(y3, yb); const xmm s3b = subpz(y3, yb);
const xmm a7f = addpz(y7, yf); const xmm s7f = subpz(y7, yf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
//-----------------------------------------------------------------------------
template <int s> struct invnend<16,s,0>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector) const noexcept
{
static const ymm rN = { 1.0/N, 1.0/N, 1.0/N, 1.0/N };
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
const ymm x0 = mulpd2(rN, getpz2(xq+s*0x0));
const ymm x1 = mulpd2(rN, getpz2(xq+s*0x1));
const ymm x2 = mulpd2(rN, getpz2(xq+s*0x2));
const ymm x3 = mulpd2(rN, getpz2(xq+s*0x3));
const ymm x4 = mulpd2(rN, getpz2(xq+s*0x4));
const ymm x5 = mulpd2(rN, getpz2(xq+s*0x5));
const ymm x6 = mulpd2(rN, getpz2(xq+s*0x6));
const ymm x7 = mulpd2(rN, getpz2(xq+s*0x7));
const ymm x8 = mulpd2(rN, getpz2(xq+s*0x8));
const ymm x9 = mulpd2(rN, getpz2(xq+s*0x9));
const ymm xa = mulpd2(rN, getpz2(xq+s*0xa));
const ymm xb = mulpd2(rN, getpz2(xq+s*0xb));
const ymm xc = mulpd2(rN, getpz2(xq+s*0xc));
const ymm xd = mulpd2(rN, getpz2(xq+s*0xd));
const ymm xe = mulpd2(rN, getpz2(xq+s*0xe));
const ymm xf = mulpd2(rN, getpz2(xq+s*0xf));
const ymm a08 = addpz2(x0, x8); const ymm s08 = subpz2(x0, x8);
const ymm a4c = addpz2(x4, xc); const ymm s4c = subpz2(x4, xc);
const ymm a2a = addpz2(x2, xa); const ymm s2a = subpz2(x2, xa);
const ymm a6e = addpz2(x6, xe); const ymm s6e = subpz2(x6, xe);
const ymm a19 = addpz2(x1, x9); const ymm s19 = subpz2(x1, x9);
const ymm a5d = addpz2(x5, xd); const ymm s5d = subpz2(x5, xd);
const ymm a3b = addpz2(x3, xb); const ymm s3b = subpz2(x3, xb);
const ymm a7f = addpz2(x7, xf); const ymm s7f = subpz2(x7, xf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <> struct invnend<16,1,0>
{
inline void operator()(complex_vector x, complex_vector) const noexcept
{
static const xmm rN = { 1.0/16, 1.0/16 };
#ifdef _OPENMP
#pragma omp single
#endif
{
zeroupper();
const xmm x0 = mulpd(rN, getpz(x[0x0]));
const xmm x1 = mulpd(rN, getpz(x[0x1]));
const xmm x2 = mulpd(rN, getpz(x[0x2]));
const xmm x3 = mulpd(rN, getpz(x[0x3]));
const xmm x4 = mulpd(rN, getpz(x[0x4]));
const xmm x5 = mulpd(rN, getpz(x[0x5]));
const xmm x6 = mulpd(rN, getpz(x[0x6]));
const xmm x7 = mulpd(rN, getpz(x[0x7]));
const xmm x8 = mulpd(rN, getpz(x[0x8]));
const xmm x9 = mulpd(rN, getpz(x[0x9]));
const xmm xa = mulpd(rN, getpz(x[0xa]));
const xmm xb = mulpd(rN, getpz(x[0xb]));
const xmm xc = mulpd(rN, getpz(x[0xc]));
const xmm xd = mulpd(rN, getpz(x[0xd]));
const xmm xe = mulpd(rN, getpz(x[0xe]));
const xmm xf = mulpd(rN, getpz(x[0xf]));
const xmm a08 = addpz(x0, x8); const xmm s08 = subpz(x0, x8);
const xmm a4c = addpz(x4, xc); const xmm s4c = subpz(x4, xc);
const xmm a2a = addpz(x2, xa); const xmm s2a = subpz(x2, xa);
const xmm a6e = addpz(x6, xe); const xmm s6e = subpz(x6, xe);
const xmm a19 = addpz(x1, x9); const xmm s19 = subpz(x1, x9);
const xmm a5d = addpz(x5, xd); const xmm s5d = subpz(x5, xd);
const xmm a3b = addpz(x3, xb); const xmm s3b = subpz(x3, xb);
const xmm a7f = addpz(x7, xf); const xmm s7f = subpz(x7, xf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo> struct inv0fft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
inv0fft<n/16,16*s,!eo>()(y, x, W);
invcore<n,s>()(x, y, W);
}
};
template <int s, bool eo> struct inv0fft<16,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
inv0end<16,s,eo>()(x, y);
}
};
template <int s, bool eo> struct inv0fft<8,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT8omp::inv0end<8,s,eo>()(x, y);
}
};
template <int s, bool eo> struct inv0fft<4,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::inv0end<4,s,eo>()(x, y);
}
};
template <int s, bool eo> struct inv0fft<2,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::inv0end<2,s,eo>()(x, y);
}
};
//-----------------------------------------------------------------------------
template <int n, int s, bool eo> struct invnfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
invnfft<n/16,16*s,!eo>()(y, x, W);
invcore<n,s>()(x, y, W);
}
};
template <int s, bool eo> struct invnfft<16,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
invnend<16,s,eo>()(x, y);
}
};
template <int s, bool eo> struct invnfft<8,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT8omp::invnend<8,s,eo>()(x, y);
}
};
template <int s, bool eo> struct invnfft<4,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::invnend<4,s,eo>()(x, y);
}
};
template <int s, bool eo> struct invnfft<2,s,eo>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::invnend<2,s,eo>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// 2 powered FFT routine
///////////////////////////////////////////////////////////////////////////////
inline void fwd(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: fwdnfft<(1<< 1),1,0>()(x, y, W); break;
case 2: fwdnfft<(1<< 2),1,0>()(x, y, W); break;
case 3: fwdnfft<(1<< 3),1,0>()(x, y, W); break;
case 4: fwdnfft<(1<< 4),1,0>()(x, y, W); break;
case 5: fwdnfft<(1<< 5),1,0>()(x, y, W); break;
case 6: fwdnfft<(1<< 6),1,0>()(x, y, W); break;
case 7: fwdnfft<(1<< 7),1,0>()(x, y, W); break;
case 8: fwdnfft<(1<< 8),1,0>()(x, y, W); break;
case 9: fwdnfft<(1<< 9),1,0>()(x, y, W); break;
case 10: fwdnfft<(1<<10),1,0>()(x, y, W); break;
case 11: fwdnfft<(1<<11),1,0>()(x, y, W); break;
case 12: fwdnfft<(1<<12),1,0>()(x, y, W); break;
case 13: fwdnfft<(1<<13),1,0>()(x, y, W); break;
case 14: fwdnfft<(1<<14),1,0>()(x, y, W); break;
case 15: fwdnfft<(1<<15),1,0>()(x, y, W); break;
case 16: fwdnfft<(1<<16),1,0>()(x, y, W); break;
case 17: fwdnfft<(1<<17),1,0>()(x, y, W); break;
case 18: fwdnfft<(1<<18),1,0>()(x, y, W); break;
case 19: fwdnfft<(1<<19),1,0>()(x, y, W); break;
case 20: fwdnfft<(1<<20),1,0>()(x, y, W); break;
case 21: fwdnfft<(1<<21),1,0>()(x, y, W); break;
case 22: fwdnfft<(1<<22),1,0>()(x, y, W); break;
case 23: fwdnfft<(1<<23),1,0>()(x, y, W); break;
case 24: fwdnfft<(1<<24),1,0>()(x, y, W); break;
}
}
inline void fwd0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: fwd0fft<(1<< 1),1,0>()(x, y, W); break;
case 2: fwd0fft<(1<< 2),1,0>()(x, y, W); break;
case 3: fwd0fft<(1<< 3),1,0>()(x, y, W); break;
case 4: fwd0fft<(1<< 4),1,0>()(x, y, W); break;
case 5: fwd0fft<(1<< 5),1,0>()(x, y, W); break;
case 6: fwd0fft<(1<< 6),1,0>()(x, y, W); break;
case 7: fwd0fft<(1<< 7),1,0>()(x, y, W); break;
case 8: fwd0fft<(1<< 8),1,0>()(x, y, W); break;
case 9: fwd0fft<(1<< 9),1,0>()(x, y, W); break;
case 10: fwd0fft<(1<<10),1,0>()(x, y, W); break;
case 11: fwd0fft<(1<<11),1,0>()(x, y, W); break;
case 12: fwd0fft<(1<<12),1,0>()(x, y, W); break;
case 13: fwd0fft<(1<<13),1,0>()(x, y, W); break;
case 14: fwd0fft<(1<<14),1,0>()(x, y, W); break;
case 15: fwd0fft<(1<<15),1,0>()(x, y, W); break;
case 16: fwd0fft<(1<<16),1,0>()(x, y, W); break;
case 17: fwd0fft<(1<<17),1,0>()(x, y, W); break;
case 18: fwd0fft<(1<<18),1,0>()(x, y, W); break;
case 19: fwd0fft<(1<<19),1,0>()(x, y, W); break;
case 20: fwd0fft<(1<<20),1,0>()(x, y, W); break;
case 21: fwd0fft<(1<<21),1,0>()(x, y, W); break;
case 22: fwd0fft<(1<<22),1,0>()(x, y, W); break;
case 23: fwd0fft<(1<<23),1,0>()(x, y, W); break;
case 24: fwd0fft<(1<<24),1,0>()(x, y, W); break;
}
}
inline void fwdn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
fwd(log_N, x, y, W);
}
inline void fwd0o(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: fwd0fft<(1<< 1),1,1>()(y, x, W); break;
case 2: fwd0fft<(1<< 2),1,1>()(y, x, W); break;
case 3: fwd0fft<(1<< 3),1,1>()(y, x, W); break;
case 4: fwd0fft<(1<< 4),1,1>()(y, x, W); break;
case 5: fwd0fft<(1<< 5),1,1>()(y, x, W); break;
case 6: fwd0fft<(1<< 6),1,1>()(y, x, W); break;
case 7: fwd0fft<(1<< 7),1,1>()(y, x, W); break;
case 8: fwd0fft<(1<< 8),1,1>()(y, x, W); break;
case 9: fwd0fft<(1<< 9),1,1>()(y, x, W); break;
case 10: fwd0fft<(1<<10),1,1>()(y, x, W); break;
case 11: fwd0fft<(1<<11),1,1>()(y, x, W); break;
case 12: fwd0fft<(1<<12),1,1>()(y, x, W); break;
case 13: fwd0fft<(1<<13),1,1>()(y, x, W); break;
case 14: fwd0fft<(1<<14),1,1>()(y, x, W); break;
case 15: fwd0fft<(1<<15),1,1>()(y, x, W); break;
case 16: fwd0fft<(1<<16),1,1>()(y, x, W); break;
case 17: fwd0fft<(1<<17),1,1>()(y, x, W); break;
case 18: fwd0fft<(1<<18),1,1>()(y, x, W); break;
case 19: fwd0fft<(1<<19),1,1>()(y, x, W); break;
case 20: fwd0fft<(1<<20),1,1>()(y, x, W); break;
case 21: fwd0fft<(1<<21),1,1>()(y, x, W); break;
case 22: fwd0fft<(1<<22),1,1>()(y, x, W); break;
case 23: fwd0fft<(1<<23),1,1>()(y, x, W); break;
case 24: fwd0fft<(1<<24),1,1>()(y, x, W); break;
}
}
inline void fwdno(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: fwdnfft<(1<< 1),1,1>()(y, x, W); break;
case 2: fwdnfft<(1<< 2),1,1>()(y, x, W); break;
case 3: fwdnfft<(1<< 3),1,1>()(y, x, W); break;
case 4: fwdnfft<(1<< 4),1,1>()(y, x, W); break;
case 5: fwdnfft<(1<< 5),1,1>()(y, x, W); break;
case 6: fwdnfft<(1<< 6),1,1>()(y, x, W); break;
case 7: fwdnfft<(1<< 7),1,1>()(y, x, W); break;
case 8: fwdnfft<(1<< 8),1,1>()(y, x, W); break;
case 9: fwdnfft<(1<< 9),1,1>()(y, x, W); break;
case 10: fwdnfft<(1<<10),1,1>()(y, x, W); break;
case 11: fwdnfft<(1<<11),1,1>()(y, x, W); break;
case 12: fwdnfft<(1<<12),1,1>()(y, x, W); break;
case 13: fwdnfft<(1<<13),1,1>()(y, x, W); break;
case 14: fwdnfft<(1<<14),1,1>()(y, x, W); break;
case 15: fwdnfft<(1<<15),1,1>()(y, x, W); break;
case 16: fwdnfft<(1<<16),1,1>()(y, x, W); break;
case 17: fwdnfft<(1<<17),1,1>()(y, x, W); break;
case 18: fwdnfft<(1<<18),1,1>()(y, x, W); break;
case 19: fwdnfft<(1<<19),1,1>()(y, x, W); break;
case 20: fwdnfft<(1<<20),1,1>()(y, x, W); break;
case 21: fwdnfft<(1<<21),1,1>()(y, x, W); break;
case 22: fwdnfft<(1<<22),1,1>()(y, x, W); break;
case 23: fwdnfft<(1<<23),1,1>()(y, x, W); break;
case 24: fwdnfft<(1<<24),1,1>()(y, x, W); break;
}
}
///////////////////////////////////////////////////////////////////////////////
inline void inv(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: inv0fft<(1<< 1),1,0>()(x, y, W); break;
case 2: inv0fft<(1<< 2),1,0>()(x, y, W); break;
case 3: inv0fft<(1<< 3),1,0>()(x, y, W); break;
case 4: inv0fft<(1<< 4),1,0>()(x, y, W); break;
case 5: inv0fft<(1<< 5),1,0>()(x, y, W); break;
case 6: inv0fft<(1<< 6),1,0>()(x, y, W); break;
case 7: inv0fft<(1<< 7),1,0>()(x, y, W); break;
case 8: inv0fft<(1<< 8),1,0>()(x, y, W); break;
case 9: inv0fft<(1<< 9),1,0>()(x, y, W); break;
case 10: inv0fft<(1<<10),1,0>()(x, y, W); break;
case 11: inv0fft<(1<<11),1,0>()(x, y, W); break;
case 12: inv0fft<(1<<12),1,0>()(x, y, W); break;
case 13: inv0fft<(1<<13),1,0>()(x, y, W); break;
case 14: inv0fft<(1<<14),1,0>()(x, y, W); break;
case 15: inv0fft<(1<<15),1,0>()(x, y, W); break;
case 16: inv0fft<(1<<16),1,0>()(x, y, W); break;
case 17: inv0fft<(1<<17),1,0>()(x, y, W); break;
case 18: inv0fft<(1<<18),1,0>()(x, y, W); break;
case 19: inv0fft<(1<<19),1,0>()(x, y, W); break;
case 20: inv0fft<(1<<20),1,0>()(x, y, W); break;
case 21: inv0fft<(1<<21),1,0>()(x, y, W); break;
case 22: inv0fft<(1<<22),1,0>()(x, y, W); break;
case 23: inv0fft<(1<<23),1,0>()(x, y, W); break;
case 24: inv0fft<(1<<24),1,0>()(x, y, W); break;
}
}
inline void inv0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
inv(log_N, x, y, W);
}
inline void invn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: invnfft<(1<< 1),1,0>()(x, y, W); break;
case 2: invnfft<(1<< 2),1,0>()(x, y, W); break;
case 3: invnfft<(1<< 3),1,0>()(x, y, W); break;
case 4: invnfft<(1<< 4),1,0>()(x, y, W); break;
case 5: invnfft<(1<< 5),1,0>()(x, y, W); break;
case 6: invnfft<(1<< 6),1,0>()(x, y, W); break;
case 7: invnfft<(1<< 7),1,0>()(x, y, W); break;
case 8: invnfft<(1<< 8),1,0>()(x, y, W); break;
case 9: invnfft<(1<< 9),1,0>()(x, y, W); break;
case 10: invnfft<(1<<10),1,0>()(x, y, W); break;
case 11: invnfft<(1<<11),1,0>()(x, y, W); break;
case 12: invnfft<(1<<12),1,0>()(x, y, W); break;
case 13: invnfft<(1<<13),1,0>()(x, y, W); break;
case 14: invnfft<(1<<14),1,0>()(x, y, W); break;
case 15: invnfft<(1<<15),1,0>()(x, y, W); break;
case 16: invnfft<(1<<16),1,0>()(x, y, W); break;
case 17: invnfft<(1<<17),1,0>()(x, y, W); break;
case 18: invnfft<(1<<18),1,0>()(x, y, W); break;
case 19: invnfft<(1<<19),1,0>()(x, y, W); break;
case 20: invnfft<(1<<20),1,0>()(x, y, W); break;
case 21: invnfft<(1<<21),1,0>()(x, y, W); break;
case 22: invnfft<(1<<22),1,0>()(x, y, W); break;
case 23: invnfft<(1<<23),1,0>()(x, y, W); break;
case 24: invnfft<(1<<24),1,0>()(x, y, W); break;
}
}
inline void inv0o(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: inv0fft<(1<< 1),1,1>()(y, x, W); break;
case 2: inv0fft<(1<< 2),1,1>()(y, x, W); break;
case 3: inv0fft<(1<< 3),1,1>()(y, x, W); break;
case 4: inv0fft<(1<< 4),1,1>()(y, x, W); break;
case 5: inv0fft<(1<< 5),1,1>()(y, x, W); break;
case 6: inv0fft<(1<< 6),1,1>()(y, x, W); break;
case 7: inv0fft<(1<< 7),1,1>()(y, x, W); break;
case 8: inv0fft<(1<< 8),1,1>()(y, x, W); break;
case 9: inv0fft<(1<< 9),1,1>()(y, x, W); break;
case 10: inv0fft<(1<<10),1,1>()(y, x, W); break;
case 11: inv0fft<(1<<11),1,1>()(y, x, W); break;
case 12: inv0fft<(1<<12),1,1>()(y, x, W); break;
case 13: inv0fft<(1<<13),1,1>()(y, x, W); break;
case 14: inv0fft<(1<<14),1,1>()(y, x, W); break;
case 15: inv0fft<(1<<15),1,1>()(y, x, W); break;
case 16: inv0fft<(1<<16),1,1>()(y, x, W); break;
case 17: inv0fft<(1<<17),1,1>()(y, x, W); break;
case 18: inv0fft<(1<<18),1,1>()(y, x, W); break;
case 19: inv0fft<(1<<19),1,1>()(y, x, W); break;
case 20: inv0fft<(1<<20),1,1>()(y, x, W); break;
case 21: inv0fft<(1<<21),1,1>()(y, x, W); break;
case 22: inv0fft<(1<<22),1,1>()(y, x, W); break;
case 23: inv0fft<(1<<23),1,1>()(y, x, W); break;
case 24: inv0fft<(1<<24),1,1>()(y, x, W); break;
}
}
inline void invno(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
#ifdef _OPENMP
#pragma omp parallel firstprivate(x,y,W)
#endif
switch (log_N) {
case 0: break;
case 1: invnfft<(1<< 1),1,1>()(y, x, W); break;
case 2: invnfft<(1<< 2),1,1>()(y, x, W); break;
case 3: invnfft<(1<< 3),1,1>()(y, x, W); break;
case 4: invnfft<(1<< 4),1,1>()(y, x, W); break;
case 5: invnfft<(1<< 5),1,1>()(y, x, W); break;
case 6: invnfft<(1<< 6),1,1>()(y, x, W); break;
case 7: invnfft<(1<< 7),1,1>()(y, x, W); break;
case 8: invnfft<(1<< 8),1,1>()(y, x, W); break;
case 9: invnfft<(1<< 9),1,1>()(y, x, W); break;
case 10: invnfft<(1<<10),1,1>()(y, x, W); break;
case 11: invnfft<(1<<11),1,1>()(y, x, W); break;
case 12: invnfft<(1<<12),1,1>()(y, x, W); break;
case 13: invnfft<(1<<13),1,1>()(y, x, W); break;
case 14: invnfft<(1<<14),1,1>()(y, x, W); break;
case 15: invnfft<(1<<15),1,1>()(y, x, W); break;
case 16: invnfft<(1<<16),1,1>()(y, x, W); break;
case 17: invnfft<(1<<17),1,1>()(y, x, W); break;
case 18: invnfft<(1<<18),1,1>()(y, x, W); break;
case 19: invnfft<(1<<19),1,1>()(y, x, W); break;
case 20: invnfft<(1<<20),1,1>()(y, x, W); break;
case 21: invnfft<(1<<21),1,1>()(y, x, W); break;
case 22: invnfft<(1<<22),1,1>()(y, x, W); break;
case 23: invnfft<(1<<23),1,1>()(y, x, W); break;
case 24: invnfft<(1<<24),1,1>()(y, x, W); break;
}
}
} /////////////////////////////////////////////////////////////////////////////
#endif // otfft_avxdit16omp_h
|
CC.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include <mpi.h>
// These macros should be defined before stdint.h is included
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS
#endif
#include <stdint.h>
#include <sys/time.h>
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <ctime>
#include <cmath>
#include "CombBLAS/CombBLAS.h"
//#define CC_TIMING 1
#define NONSTAR 0
#define STAR 1
#define CONVERGED 2
using namespace std;
/**
** Connected components based on Awerbuch-Shiloach algorithm
**/
namespace combblas {
template <typename T1, typename T2>
struct Select2ndMinSR
{
typedef typename promote_trait<T1,T2>::T_promote T_promote;
static T_promote id(){ return std::numeric_limits<T_promote>::max(); };
static bool returnedSAID() { return false; }
static MPI_Op mpi_op() { return MPI_MIN; };
static T_promote add(const T_promote& arg1, const T_promote& arg2)
{
return std::min(arg1, arg2);
}
static T_promote multiply(const T1& arg1, const T2& arg2)
{
return static_cast<T_promote>(arg2);
}
static void axpy(const T1 a, const T2& x, T_promote& y)
{
y = add(y, multiply(a, x));
}
};
template <class T, class I>
void omp_par_scan(T* A, T* B,I cnt)
{
int p=omp_get_max_threads();
if(cnt<100*p){
for(I i=1;i<cnt;i++)
B[i]=B[i-1]+A[i-1];
return;
}
I step_size=cnt/p;
#pragma omp parallel for
for(int i=0; i<p; i++){
int start=i*step_size;
int end=start+step_size;
if(i==p-1) end=cnt;
if(i!=0)B[start]=0;
for(I j=start+1; j<end; j++)
B[j]=B[j-1]+A[j-1];
}
T* sum=new T[p];
sum[0]=0;
for(int i=1;i<p;i++)
sum[i]=sum[i-1]+B[i*step_size-1]+A[i*step_size-1];
#pragma omp parallel for
for(int i=1; i<p; i++){
int start=i*step_size;
int end=start+step_size;
if(i==p-1) end=cnt;
T sum_=sum[i];
for(I j=start; j<end; j++)
B[j]+=sum_;
}
delete[] sum;
}
// copied from usort so that we can select k
// an increased value of k reduces the bandwidth cost, but increases the latency cost
// this does not work when p is not power of two and a processor is not sending data,
template <typename T>
int Mpi_Alltoallv_kway(T* sbuff_, int* s_cnt_, int* sdisp_,
T* rbuff_, int* r_cnt_, int* rdisp_, MPI_Comm c, int kway=2)
{
int np, pid;
MPI_Comm_size(c,&np);
MPI_Comm_rank(c,&pid);
if(np==1 || kway==1)
{
return MPI_Alltoallv(sbuff_, s_cnt_, sdisp_, MPIType<T>(), rbuff_, r_cnt_, rdisp_, MPIType<T>(), c);
}
int range[2]={0,np};
std::vector<int> s_cnt(np);
#pragma omp parallel for
for(int i=0;i<np;i++){
s_cnt[i]=s_cnt_[i]*sizeof(T)+2*sizeof(int);
}
std::vector<int> sdisp(np); sdisp[0]=0;
omp_par_scan(&s_cnt[0],&sdisp[0],np);
char* sbuff=new char[sdisp[np-1]+s_cnt[np-1]];
#pragma omp parallel for
for(int i=0;i<np;i++){
((int*)&sbuff[sdisp[i]])[0]=s_cnt[i];
((int*)&sbuff[sdisp[i]])[1]=pid;
memcpy(&sbuff[sdisp[i]]+2*sizeof(int),&sbuff_[sdisp_[i]],s_cnt[i]-2*sizeof(int));
}
//int t_indx=0;
int iter_cnt=0;
while(range[1]-range[0]>1){
iter_cnt++;
if(kway>range[1]-range[0])
kway=range[1]-range[0];
std::vector<int> new_range(kway+1);
for(int i=0;i<=kway;i++)
new_range[i]=(range[0]*(kway-i)+range[1]*i)/kway;
int p_class=(std::upper_bound(&new_range[0],&new_range[kway],pid)-&new_range[0]-1);
int new_np=new_range[p_class+1]-new_range[p_class];
int new_pid=pid-new_range[p_class];
//Communication.
{
std::vector<int> r_cnt (new_np*kway, 0);
std::vector<int> r_cnt_ext(new_np*kway, 0);
//Exchange send sizes.
for(int i=0;i<kway;i++){
MPI_Status status;
int cmp_np=new_range[i+1]-new_range[i];
int partner=(new_pid<cmp_np? new_range[i]+new_pid: new_range[i+1]-1) ;
assert( (new_pid<cmp_np? true: new_range[i]+new_pid==new_range[i+1] )); //Remove this.
MPI_Sendrecv(&s_cnt[new_range[i]-new_range[0]], cmp_np, MPI_INT, partner, 0,
&r_cnt[new_np *i ], new_np, MPI_INT, partner, 0, c,&status);
//Handle extra communication.
if(new_pid==new_np-1&& cmp_np>new_np){
int partner=new_range[i+1]-1;
std::vector<int> s_cnt_ext(cmp_np, 0);
MPI_Sendrecv(&s_cnt_ext[ 0], cmp_np, MPI_INT, partner, 0,
&r_cnt_ext[new_np*i], new_np, MPI_INT, partner, 0, c,&status);
}
}
//Allocate receive buffer.
std::vector<int> rdisp (new_np*kway, 0);
std::vector<int> rdisp_ext(new_np*kway, 0);
int rbuff_size, rbuff_size_ext;
char *rbuff, *rbuff_ext;
{
omp_par_scan(&r_cnt [0],&rdisp [0],new_np*kway);
omp_par_scan(&r_cnt_ext[0],&rdisp_ext[0],new_np*kway);
rbuff_size = rdisp [new_np*kway-1] + r_cnt [new_np*kway-1];
rbuff_size_ext = rdisp_ext[new_np*kway-1] + r_cnt_ext[new_np*kway-1];
rbuff = new char[rbuff_size ];
rbuff_ext = new char[rbuff_size_ext];
}
//Sendrecv data.
//*
int my_block=kway;
while(pid<new_range[my_block]) my_block--;
// MPI_Barrier(c);
for(int i_=0;i_<=kway/2;i_++){
int i1=(my_block+i_)%kway;
int i2=(my_block+kway-i_)%kway;
for(int j=0;j<(i_==0 || i_==kway/2?1:2);j++){
int i=(i_==0?i1:((j+my_block/i_)%2?i1:i2));
MPI_Status status;
int cmp_np=new_range[i+1]-new_range[i];
int partner=(new_pid<cmp_np? new_range[i]+new_pid: new_range[i+1]-1) ;
int send_dsp =sdisp[new_range[i ]-new_range[0] ];
int send_dsp_last=sdisp[new_range[i+1]-new_range[0]-1];
int send_cnt =s_cnt[new_range[i+1]-new_range[0]-1]+send_dsp_last-send_dsp;
// ttt=omp_get_wtime();
MPI_Sendrecv(&sbuff[send_dsp], send_cnt, MPI_BYTE, partner, 0,
&rbuff[rdisp[new_np * i ]], r_cnt[new_np *(i+1)-1]+rdisp[new_np *(i+1)-1]-rdisp[new_np * i ], MPI_BYTE, partner, 0, c,&status);
//Handle extra communication.
if(pid==new_np-1&& cmp_np>new_np){
int partner=new_range[i+1]-1;
std::vector<int> s_cnt_ext(cmp_np, 0);
MPI_Sendrecv( NULL, 0, MPI_BYTE, partner, 0,
&rbuff[rdisp_ext[new_np*i]], r_cnt_ext[new_np*(i+1)-1]+rdisp_ext[new_np*(i+1)-1]-rdisp_ext[new_np*i], MPI_BYTE, partner, 0, c,&status);
}
}
}
//Rearrange received data.
{
if(sbuff!=NULL) delete[] sbuff;
sbuff=new char[rbuff_size+rbuff_size_ext];
std::vector<int> cnt_new(2*new_np*kway, 0);
std::vector<int> disp_new(2*new_np*kway, 0);
for(int i=0;i<new_np;i++)
for(int j=0;j<kway;j++){
cnt_new[(i*2 )*kway+j]=r_cnt [j*new_np+i];
cnt_new[(i*2+1)*kway+j]=r_cnt_ext[j*new_np+i];
}
omp_par_scan(&cnt_new[0],&disp_new[0],2*new_np*kway);
#pragma omp parallel for
for(int i=0;i<new_np;i++)
for(int j=0;j<kway;j++){
memcpy(&sbuff[disp_new[(i*2 )*kway+j]],&rbuff [rdisp [j*new_np+i]], r_cnt [j*new_np+i]);
memcpy(&sbuff[disp_new[(i*2+1)*kway+j]],&rbuff_ext[rdisp_ext[j*new_np+i]], r_cnt_ext[j*new_np+i]);
}
//Free memory.
if(rbuff !=NULL) delete[] rbuff ;
if(rbuff_ext!=NULL) delete[] rbuff_ext;
s_cnt.clear();
s_cnt.resize(new_np,0);
sdisp.resize(new_np);
for(int i=0;i<new_np;i++){
for(int j=0;j<2*kway;j++)
s_cnt[i]+=cnt_new[i*2*kway+j];
sdisp[i]=disp_new[i*2*kway];
}
}
}
range[0]=new_range[p_class ];
range[1]=new_range[p_class+1];
}
//Copy data to rbuff_.
std::vector<char*> buff_ptr(np);
char* tmp_ptr=sbuff;
for(int i=0;i<np;i++){
int& blk_size=((int*)tmp_ptr)[0];
buff_ptr[i]=tmp_ptr;
tmp_ptr+=blk_size;
}
#pragma omp parallel for
for(int i=0;i<np;i++){
int& blk_size=((int*)buff_ptr[i])[0];
int& src_pid=((int*)buff_ptr[i])[1];
assert(blk_size-2*sizeof(int)<=r_cnt_[src_pid]*sizeof(T));
memcpy(&rbuff_[rdisp_[src_pid]],buff_ptr[i]+2*sizeof(int),blk_size-2*sizeof(int));
}
//Free memory.
if(sbuff !=NULL) delete[] sbuff;
return 1;
}
template <typename T>
int Mpi_Alltoallv(T* sbuff, int* s_cnt, int* sdisp,
T* rbuff, int* r_cnt, int* rdisp, MPI_Comm comm)
{
int nprocs, rank;
MPI_Comm_size(comm,&nprocs);
MPI_Comm_rank(comm,&rank);
int commCnt = 0;
for(int i = 0; i < nprocs; i++)
{
if(i==rank) continue;
if(s_cnt[i] > 0) commCnt++;
if(r_cnt[i] > 0) commCnt++;
}
int totalCommCnt = 0;
MPI_Allreduce(&commCnt,&totalCommCnt, 1, MPI_INT, MPI_SUM, comm);
if(totalCommCnt < 2*log2(nprocs))
{
return par::Mpi_Alltoallv_sparse(sbuff, s_cnt, sdisp, rbuff, r_cnt, rdisp, comm);
}
else if((nprocs& (nprocs - 1)) == 0) // processor count is power of 2
{
Mpi_Alltoallv_kway(sbuff, s_cnt, sdisp, rbuff, r_cnt, rdisp, comm);
}
else
{
return MPI_Alltoallv(sbuff, s_cnt, sdisp, MPIType<T>(), rbuff, r_cnt, rdisp, MPIType<T>(), comm);
}
return 1;
}
template <class IT, class NT>
int replicate(const FullyDistVec<IT,NT> dense, FullyDistSpVec<IT,IT> ri, vector<vector<NT>>&bcastBuffer)
{
auto commGrid = dense.getcommgrid();
MPI_Comm World = commGrid->GetWorld();
int nprocs = commGrid->GetSize();
vector<int> sendcnt (nprocs,0);
vector<int> recvcnt (nprocs,0);
std::vector<IT> rinum = ri.GetLocalNum();
IT riloclen = rinum.size();
for(IT i=0; i < riloclen; ++i)
{
IT locind;
int owner = dense.Owner(rinum[i], locind);
sendcnt[owner]++;
}
MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World);
IT totrecv = std::accumulate(recvcnt.begin(),recvcnt.end(), static_cast<IT>(0));
double broadcast_cost = dense.LocArrSize() * log2(nprocs); // bandwidth cost
IT bcastsize = 0;
vector<IT> bcastcnt(nprocs,0);
int nbcast = 0;
if(broadcast_cost < totrecv)
{
bcastsize = dense.LocArrSize();
}
MPI_Allgather(&bcastsize, 1, MPIType<IT>(), bcastcnt.data(), 1, MPIType<IT>(), World);
for(int i=0; i<nprocs; i++)
{
if(bcastcnt[i]>0) nbcast++;
}
if(nbcast > 0)
{
MPI_Request* requests = new MPI_Request[nbcast];
assert(requests);
MPI_Status* statuses = new MPI_Status[nbcast];
assert(statuses);
int ibcast = 0;
const NT * arr = dense.GetLocArr();
for(int i=0; i<nprocs; i++)
{
if(bcastcnt[i]>0)
{
bcastBuffer[i].resize(bcastcnt[i]);
std::copy(arr, arr+bcastcnt[i], bcastBuffer[i].begin());
MPI_Ibcast(bcastBuffer[i].data(), bcastcnt[i], MPIType<NT>(), i, World,&requests[ibcast++]);
}
}
MPI_Waitall(nbcast, requests, statuses);
delete [] requests;
delete [] statuses;
}
return nbcast;
}
// SubRef usign a sparse vector
// given a dense vector dv and a sparse vector sv
// sv_out[i]=dv[sv[i]] for all nonzero index i in sv
// return sv_out
// If sv has repeated entries, many processes are requesting same entries of dv from the same processes
// (usually from the low rank processes in LACC)
// In this case, it may be beneficial to broadcast some entries of dv so that dv[sv[i]] can be obtained locally.
// This logic is implemented in this function: replicate(dense, ri, bcastBuffer)
template <class IT, class NT>
FullyDistSpVec<IT,NT> Extract (const FullyDistVec<IT,NT> dense, FullyDistSpVec<IT,IT> ri)
{
#ifdef CC_TIMING
double ts = MPI_Wtime();
std::ostringstream outs;
outs.str("");
outs.clear();
outs<< " Extract timing: ";
#endif
auto commGrid = ri.getcommgrid();
MPI_Comm World = commGrid->GetWorld();
int nprocs = commGrid->GetSize();
if(!(commGrid == dense.getcommgrid()))
{
std::cout << "Grids are not comparable for dense vector subsref" << std::endl;
return FullyDistSpVec<IT,NT>();
}
vector<vector<NT>> bcastBuffer(nprocs);
#ifdef CC_TIMING
double t1 = MPI_Wtime();
#endif
int nbcast = replicate(dense, ri, bcastBuffer);
#ifdef CC_TIMING
double bcast = MPI_Wtime() - t1;
outs << "bcast ( " << nbcast << " ): " << bcast << " ";
#endif
std::vector< std::vector< IT > > data_req(nprocs);
std::vector< std::vector< IT > > revr_map(nprocs); // to put the incoming data to the correct location
const NT * arr = dense.GetLocArr();
std::vector<IT> rinum = ri.GetLocalNum();
IT riloclen = rinum.size();
std::vector<NT> num(riloclen); // final output
for(IT i=0; i < riloclen; ++i)
{
IT locind;
int owner = dense.Owner(rinum[i], locind);
if(bcastBuffer[owner].size() == 0)
{
data_req[owner].push_back(locind);
revr_map[owner].push_back(i);
}
else
{
num[i] =bcastBuffer[owner][locind];
}
}
int * sendcnt = new int[nprocs];
int * sdispls = new int[nprocs];
for(int i=0; i<nprocs; ++i)
sendcnt[i] = (int) data_req[i].size();
int * rdispls = new int[nprocs];
int * recvcnt = new int[nprocs];
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts
#ifdef CC_TIMING
double all2ll1 = MPI_Wtime() - t1;
outs << "all2ll1: " << all2ll1 << " ";
#endif
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<nprocs-1; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
IT totsend = std::accumulate(sendcnt,sendcnt+nprocs, static_cast<IT>(0));
IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs, static_cast<IT>(0));
IT * sendbuf = new IT[totsend];
for(int i=0; i<nprocs; ++i)
{
std::copy(data_req[i].begin(), data_req[i].end(), sendbuf+sdispls[i]);
std::vector<IT>().swap(data_req[i]);
}
IT * reversemap = new IT[totsend];
for(int i=0; i<nprocs; ++i)
{
std::copy(revr_map[i].begin(), revr_map[i].end(), reversemap+sdispls[i]); // reversemap array is unique
std::vector<IT>().swap(revr_map[i]);
}
IT * recvbuf = new IT[totrecv];
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
Mpi_Alltoallv(sendbuf, sendcnt, sdispls, recvbuf, recvcnt, rdispls, World);
#ifdef CC_TIMING
double all2ll2 = MPI_Wtime() - t1;
outs << "all2ll2: " << all2ll2 << " ";
#endif
delete [] sendbuf;
// access requested data
NT * databack = new NT[totrecv];
#ifdef THREADED
#pragma omp parallel for
#endif
for(int i=0; i<totrecv; ++i)
databack[i] = arr[recvbuf[i]];
delete [] recvbuf;
// communicate requested data
NT * databuf = new NT[totsend];
// the response counts are the same as the request counts
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
//Mpi_Alltoallv_sparse(databack, recvcnt, rdispls,databuf, sendcnt, sdispls, World);
Mpi_Alltoallv(databack, recvcnt, rdispls,databuf, sendcnt, sdispls, World);
#ifdef CC_TIMING
double all2ll3 = MPI_Wtime() - t1;
outs << "all2ll3: " << all2ll3 << " ";
#endif
// Create the output from databuf
for(int i=0; i<totsend; ++i)
num[reversemap[i]] = databuf[i];
DeleteAll(rdispls, recvcnt, databack);
DeleteAll(sdispls, sendcnt, databuf,reversemap);
std::vector<IT> ind = ri.GetLocalInd ();
IT globallen = ri.TotalLength();
FullyDistSpVec<IT, NT> indexed(commGrid, globallen, ind, num, true, true);
#ifdef CC_TIMING
double total = MPI_Wtime() - ts;
outs << "others: " << total - (bcast + all2ll1 + all2ll2 + all2ll3) << " ";
outs<< endl;
SpParHelper::Print(outs.str());
#endif
return indexed;
}
template <class IT, class NT>
int ReduceAssign(FullyDistSpVec<IT,IT>& ind, FullyDistSpVec<IT,NT>& val, vector<vector<NT>>&reduceBuffer, NT MAX_FOR_REDUCE)
{
auto commGrid = ind.getcommgrid();
MPI_Comm World = commGrid->GetWorld();
int nprocs = commGrid->GetSize();
int myrank;
MPI_Comm_rank(World,&myrank);
vector<int> sendcnt (nprocs,0);
vector<int> recvcnt (nprocs);
std::vector<std::vector<IT>> indBuf(nprocs);
std::vector<std::vector<NT>> valBuf(nprocs);
std::vector<IT> indices = ind.GetLocalNum();
std::vector<NT> values = val.GetLocalNum();
IT riloclen = indices.size();
for(IT i=0; i < riloclen; ++i)
{
IT locind;
int owner = ind.Owner(indices[i], locind);
indBuf[owner].push_back(locind);
valBuf[owner].push_back(values[i]);
sendcnt[owner]++;
}
MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World);
IT totrecv = std::accumulate(recvcnt.begin(),recvcnt.end(), static_cast<IT>(0));
double reduceCost = ind.MyLocLength() * log2(nprocs); // bandwidth cost
IT reducesize = 0;
vector<IT> reducecnt(nprocs,0);
int nreduce = 0;
if(reduceCost < totrecv)
{
reducesize = ind.MyLocLength();
}
MPI_Allgather(&reducesize, 1, MPIType<IT>(), reducecnt.data(), 1, MPIType<IT>(), World);
for(int i=0; i<nprocs; ++i)
{
if(reducecnt[i]>0) nreduce++;
}
if(nreduce > 0)
{
MPI_Request* requests = new MPI_Request[nreduce];
assert(requests);
MPI_Status* statuses = new MPI_Status[nreduce];
assert(statuses);
int ireduce = 0;
for(int i=0; i<nprocs; ++i)
{
if(reducecnt[i]>0)
{
reduceBuffer[i].resize(reducecnt[i], MAX_FOR_REDUCE); // this is specific to LACC
for(int j=0; j<sendcnt[i]; j++)
reduceBuffer[i][indBuf[i][j]] = std::min(reduceBuffer[i][indBuf[i][j]], valBuf[i][j]);
if(myrank==i)
MPI_Ireduce(MPI_IN_PLACE, reduceBuffer[i].data(), reducecnt[i], MPIType<NT>(), MPI_MIN, i, World,&requests[ireduce++]);
else
MPI_Ireduce(reduceBuffer[i].data(), NULL, reducecnt[i], MPIType<NT>(), MPI_MIN, i, World,&requests[ireduce++]);
}
}
MPI_Waitall(nreduce, requests, statuses);
//MPI_Barrier(World);
delete [] requests;
delete [] statuses;
}
return nreduce;
}
// for fixed value
template <class IT, class NT>
int ReduceAssign(FullyDistSpVec<IT,IT>& ind, NT val, vector<vector<NT>>&reduceBuffer, NT MAX_FOR_REDUCE)
{
auto commGrid = ind.getcommgrid();
MPI_Comm World = commGrid->GetWorld();
int nprocs = commGrid->GetSize();
int myrank;
MPI_Comm_rank(World,&myrank);
vector<int> sendcnt (nprocs,0);
vector<int> recvcnt (nprocs);
std::vector<std::vector<IT>> indBuf(nprocs);
std::vector<IT> indices = ind.GetLocalNum();
IT riloclen = indices.size();
for(IT i=0; i < riloclen; ++i)
{
IT locind;
int owner = ind.Owner(indices[i], locind);
indBuf[owner].push_back(locind);
sendcnt[owner]++;
}
MPI_Alltoall(sendcnt.data(), 1, MPI_INT, recvcnt.data(), 1, MPI_INT, World);
IT totrecv = std::accumulate(recvcnt.begin(),recvcnt.end(), static_cast<IT>(0));
double reduceCost = ind.MyLocLength() * log2(nprocs); // bandwidth cost
IT reducesize = 0;
vector<IT> reducecnt(nprocs,0);
int nreduce = 0;
if(reduceCost < totrecv)
{
reducesize = ind.MyLocLength();
}
MPI_Allgather(&reducesize, 1, MPIType<IT>(), reducecnt.data(), 1, MPIType<IT>(), World);
for(int i=0; i<nprocs; ++i)
{
if(reducecnt[i]>0) nreduce++;
}
if(nreduce > 0)
{
MPI_Request* requests = new MPI_Request[nreduce];
assert(requests);
MPI_Status* statuses = new MPI_Status[nreduce];
assert(statuses);
int ireduce = 0;
for(int i=0; i<nprocs; ++i)
{
if(reducecnt[i]>0)
{
reduceBuffer[i].resize(reducecnt[i], MAX_FOR_REDUCE); // this is specific to LACC
for(int j=0; j<sendcnt[i]; j++)
reduceBuffer[i][indBuf[i][j]] = val;
if(myrank==i)
MPI_Ireduce(MPI_IN_PLACE, reduceBuffer[i].data(), reducecnt[i], MPIType<NT>(), MPI_MIN, i, World,&requests[ireduce++]);
else
MPI_Ireduce(reduceBuffer[i].data(), NULL, reducecnt[i], MPIType<NT>(), MPI_MIN, i, World,&requests[ireduce++]);
}
}
MPI_Waitall(nreduce, requests, statuses);
//MPI_Barrier(World);
delete [] requests;
delete [] statuses;
}
return nreduce;
}
// given two sparse vectors sv and val
// sv_out[sv[i]] = val[i] for all nonzero index i in sv, whre sv_out is the output sparse vector
// If sv has repeated entries, a process may receive the same values of sv from different processes
// In this case, it may be beneficial to reduce some entries of sv so that sv_out[sv[i]] can be updated locally.
// This logic is implemented in this function: ReduceAssign
template <class IT, class NT>
FullyDistSpVec<IT,NT> Assign(FullyDistSpVec<IT,IT>& ind, FullyDistSpVec<IT,NT>& val)
{
IT ploclen = ind.getlocnnz();
if(ploclen != val.getlocnnz())
{
SpParHelper::Print("Assign error: Index and value vectors have different size !!!\n");
return FullyDistSpVec<IT,NT>(ind.getcommgrid());
}
IT globallen = ind.TotalLength();
IT maxInd = ind.Reduce(maximum<IT>(), (IT) 0 ) ;
if(maxInd >= globallen)
{
std::cout << "At least one requested index is larger than the global length" << std::endl;
return FullyDistSpVec<IT,NT>(ind.getcommgrid());
}
#ifdef CC_TIMING
double ts = MPI_Wtime();
std::ostringstream outs;
outs.str("");
outs.clear();
outs<< " Assign timing: ";
#endif
auto commGrid = ind.getcommgrid();
MPI_Comm World = commGrid->GetWorld();
int nprocs = commGrid->GetSize();
int * rdispls = new int[nprocs+1];
int * recvcnt = new int[nprocs];
int * sendcnt = new int[nprocs](); // initialize to 0
int * sdispls = new int[nprocs+1];
vector<vector<NT>> reduceBuffer(nprocs);
#ifdef CC_TIMING
double t1 = MPI_Wtime();
#endif
NT MAX_FOR_REDUCE = static_cast<NT>(globallen);
int nreduce = ReduceAssign(ind, val, reduceBuffer, MAX_FOR_REDUCE);
#ifdef CC_TIMING
double reduce = MPI_Wtime() - t1;
outs << "reduce (" << nreduce << "): " << reduce << " ";
#endif
std::vector<std::vector<IT>> indBuf(nprocs);
std::vector<std::vector<NT>> valBuf(nprocs);
std::vector<IT> indices = ind.GetLocalNum();
std::vector<NT> values = val.GetLocalNum();
IT riloclen = indices.size();
for(IT i=0; i < riloclen; ++i)
{
IT locind;
int owner = ind.Owner(indices[i], locind);
if(reduceBuffer[owner].size() == 0)
{
indBuf[owner].push_back(locind);
valBuf[owner].push_back(values[i]);
sendcnt[owner]++;
}
}
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
#ifdef CC_TIMING
double all2ll1 = MPI_Wtime() - t1;
outs << "all2ll1: " << all2ll1 << " ";
#endif
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<nprocs; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
IT totsend = sdispls[nprocs];
IT totrecv = rdispls[nprocs];
vector<IT> sendInd(totsend);
vector<NT> sendVal(totsend);
for(int i=0; i<nprocs; ++i)
{
std::copy(indBuf[i].begin(), indBuf[i].end(), sendInd.begin()+sdispls[i]);
std::vector<IT>().swap(indBuf[i]);
std::copy(valBuf[i].begin(), valBuf[i].end(), sendVal.begin()+sdispls[i]);
std::vector<NT>().swap(valBuf[i]);
}
vector<IT> recvInd(totrecv);
vector<NT> recvVal(totrecv);
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
Mpi_Alltoallv(sendInd.data(), sendcnt, sdispls, recvInd.data(), recvcnt, rdispls, World);
//MPI_Alltoallv(sendInd.data(), sendcnt, sdispls, MPIType<IT>(), recvInd.data(), recvcnt, rdispls, MPIType<IT>(), World);
#ifdef CC_TIMING
double all2ll2 = MPI_Wtime() - t1;
outs << "all2ll2: " << all2ll2 << " ";
#endif
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
Mpi_Alltoallv(sendVal.data(), sendcnt, sdispls, recvVal.data(), recvcnt, rdispls, World);
#ifdef CC_TIMING
double all2ll3 = MPI_Wtime() - t1;
outs << "all2ll3: " << all2ll3 << " ";
#endif
DeleteAll(sdispls, rdispls, sendcnt, recvcnt);
int myrank;
MPI_Comm_rank(World,&myrank);
if(reduceBuffer[myrank].size()>0)
{
//cout << myrank << " : " << recvInd.size() << endl;
for(int i=0; i<reduceBuffer[myrank].size(); i++)
{
if(reduceBuffer[myrank][i] < MAX_FOR_REDUCE)
{
recvInd.push_back(i);
recvVal.push_back(reduceBuffer[myrank][i]);
}
}
}
FullyDistSpVec<IT, NT> indexed(commGrid, globallen, recvInd, recvVal, false, false);
#ifdef CC_TIMING
double total = MPI_Wtime() - ts;
outs << "others: " << total - (reduce + all2ll1 + all2ll2 + all2ll3) << " ";
outs<< endl;
SpParHelper::Print(outs.str());
#endif
return indexed;
}
// given a sparse vector sv
// sv_out[sv[i]] = val for all nonzero index i in sv, whre sv_out is the output sparse vector
// If sv has repeated entries, a process may receive the same values of sv from different processes
// In this case, it may be beneficial to reduce some entries of sv so that sv_out[sv[i]] can be updated locally.
// This logic is implemented in this function: ReduceAssign
template <class IT, class NT>
FullyDistSpVec<IT,NT> Assign (FullyDistSpVec<IT,IT>& ind, NT val)
{
IT globallen = ind.TotalLength();
IT maxInd = ind.Reduce(maximum<IT>(), (IT) 0 ) ;
if(maxInd >= globallen)
{
std::cout << "At least one requested index is larger than the global length" << std::endl;
return FullyDistSpVec<IT,NT>(ind.getcommgrid());
}
#ifdef CC_TIMING
double ts = MPI_Wtime();
std::ostringstream outs;
outs.str("");
outs.clear();
outs<< " Assign timing: ";
#endif
auto commGrid = ind.getcommgrid();
MPI_Comm World = commGrid->GetWorld();
int nprocs = commGrid->GetSize();
int * rdispls = new int[nprocs+1];
int * recvcnt = new int[nprocs];
int * sendcnt = new int[nprocs](); // initialize to 0
int * sdispls = new int[nprocs+1];
vector<vector<NT>> reduceBuffer(nprocs);
#ifdef CC_TIMING
double t1 = MPI_Wtime();
#endif
NT MAX_FOR_REDUCE = static_cast<NT>(globallen);
int nreduce = ReduceAssign(ind, val, reduceBuffer, MAX_FOR_REDUCE);
#ifdef CC_TIMING
double reduce = MPI_Wtime() - t1;
outs << "reduce ( " << nreduce << " ): " << reduce << " ";
#endif
std::vector<std::vector<IT>> indBuf(nprocs);
std::vector<IT> indices = ind.GetLocalNum();
IT riloclen = indices.size();
for(IT i=0; i < riloclen; ++i)
{
IT locind;
int owner = ind.Owner(indices[i], locind);
if(reduceBuffer[owner].size() == 0)
{
indBuf[owner].push_back(locind);
sendcnt[owner]++;
}
}
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World);
#ifdef CC_TIMING
double all2ll1 = MPI_Wtime() - t1;
outs << "all2ll1: " << all2ll1 << " ";
#endif
sdispls[0] = 0;
rdispls[0] = 0;
for(int i=0; i<nprocs; ++i)
{
sdispls[i+1] = sdispls[i] + sendcnt[i];
rdispls[i+1] = rdispls[i] + recvcnt[i];
}
IT totsend = sdispls[nprocs];
IT totrecv = rdispls[nprocs];
vector<IT> sendInd(totsend);
for(int i=0; i<nprocs; ++i)
{
std::copy(indBuf[i].begin(), indBuf[i].end(), sendInd.begin()+sdispls[i]);
std::vector<IT>().swap(indBuf[i]);
}
vector<IT> recvInd(totrecv);
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
Mpi_Alltoallv(sendInd.data(), sendcnt, sdispls, recvInd.data(), recvcnt, rdispls, World);
//MPI_Alltoallv(sendInd.data(), sendcnt, sdispls, MPIType<IT>(), recvInd.data(), recvcnt, rdispls, MPIType<IT>(), World);
#ifdef CC_TIMING
double all2ll2 = MPI_Wtime() - t1;
outs << "all2ll2: " << all2ll2 << " ";
outs << "all2ll3: " << 0 << " ";
#endif
DeleteAll(sdispls, rdispls, sendcnt, recvcnt);
int myrank;
MPI_Comm_rank(World,&myrank);
vector<NT> recvVal(totrecv);
if(reduceBuffer[myrank].size()>0)
{
//cout << myrank << " : " << recvInd.size() << endl;
for(int i=0; i<reduceBuffer[myrank].size(); i++)
{
if(reduceBuffer[myrank][i] < MAX_FOR_REDUCE)
{
recvInd.push_back(i);
recvVal.push_back(val);
}
}
}
FullyDistSpVec<IT, NT> indexed(commGrid, globallen, recvInd, recvVal, false, false);
#ifdef CC_TIMING
double total = MPI_Wtime() - ts;
outs << "others: " << total - (reduce + all2ll1 + all2ll2) << " ";
outs<< endl;
SpParHelper::Print(outs.str());
#endif
return indexed;
}
// special starcheck after conditional and unconditional hooking
template <typename IT, typename NT, typename DER>
void StarCheckAfterHooking(const SpParMat<IT, NT, DER>& A, FullyDistVec<IT, IT>& parent,
FullyDistVec<IT, short>& star, FullyDistSpVec<IT, IT> condhooks, bool isStar2StarHookPossible)
{
// hooks are nonstars
star.EWiseApply(condhooks, [](short isStar, IT x){ return static_cast<short>(NONSTAR); },
false, static_cast<IT>(NONSTAR));
if(isStar2StarHookPossible)
{
// this is not needed in the first iteration see the complicated proof in the paper
// parents of hooks are nonstars
// needed only after conditional hooking because in that case star can hook to a star
FullyDistSpVec<IT, short> pNonStar= Assign(condhooks, NONSTAR);
star.Set(pNonStar);
}
// star(parent)
// If I am a star, I would like to know the star information of my parent
// children of hooks and parents of hooks are nonstars
// NOTE: they are not needed in the first iteration
FullyDistSpVec<IT, short> spStars(star, [](short isStar){ return isStar == STAR; });
FullyDistSpVec<IT, IT> parentOfStars = EWiseApply<IT>(spStars, parent,
[](short isStar, IT p){ return p; },
[](short isStar, IT p){ return true; },
false, static_cast<short>(0));
FullyDistSpVec<IT, short> isParentStar = Extract(star, parentOfStars);
star.Set(isParentStar);
}
/*
// In iteration 1: "stars" has both vertices belongihg to stars and nonstars (no converged)
// we only process nonstars and identify starts from them
// After iteration 1: "stars" has vertices belongihg to converged and nonstars (no stars)
// we only process nonstars and identify starts from them
template <typename IT>
void StarCheck(FullyDistVec<IT, IT>& parents, FullyDistVec<IT,short>& stars)
{
// this is done here so that in the first iteration, we don't process STAR vertices
FullyDistSpVec<IT,short> nonStars(stars, [](short isStar){return isStar==NONSTAR;});
// initialize all nonstars to stars
stars.Apply([](short isStar){return isStar==NONSTAR? STAR: isStar;});
// identify vertices at level >= 2 (grandchildren of roots)
FullyDistSpVec<IT, IT> pOfNonStars = EWiseApply<IT>(nonStars, parents,
[](short isStar, IT p){return p;},
[](short isStar, IT p){return true;},
false, static_cast<short>(0));
FullyDistSpVec<IT,IT> gpOfNonStars = Extract(parents, pOfNonStars);
FullyDistSpVec<IT,short> keptNonStars = EWiseApply<short>(pOfNonStars, gpOfNonStars,
[](IT p, IT gp){return static_cast<short>(NONSTAR);},
[](IT p, IT gp){return p!=gp;},
false, false, static_cast<IT>(0), static_cast<IT>(0));
stars.Set(keptNonStars); // setting level > 2 vertices as nonstars
// identify grand parents of kept nonstars
FullyDistSpVec<IT,IT> gpOfKeptNonStars = EWiseApply<IT>(pOfNonStars, gpOfNonStars,
[](IT p, IT gp){return gp;},
[](IT p, IT gp){return p!=gp;},
false, false, static_cast<IT>(0), static_cast<IT>(0));
//FullyDistSpVec<IT, short> fixedNS = gpOfKeptNonStars;
//fixedNS = NONSTAR;
FullyDistSpVec<IT, short> gpNonStar= Assign(gpOfKeptNonStars, NONSTAR);
stars.Set(gpNonStar);
// remaining vertices: level-1 leaves of nonstars and any vertices in previous stars (iteration 1 only)
FullyDistSpVec<IT,short> spStars(stars, [](short isStar){return isStar==STAR;});
// further optimization can be done to remove previous stars
FullyDistSpVec<IT, IT> pOfStars = EWiseApply<IT>(spStars, parents,
[](short isStar, IT p){return p;},
[](short isStar, IT p){return true;},
false, static_cast<short>(0));
FullyDistSpVec<IT,short> isParentStar = Extract(stars, pOfStars);
stars.Set(isParentStar);
}
*/
// In iteration>1:
// We have only CONVERGED or NONSTAR vertices
// some of the NONSTAR vertices may become STAR in the last shortcut operation
// We would like to identify those new stars
// In iteration 1:
// we have STAR and NONSTAR vertices
// every hooked vertex is marked as NONSTARs
// roots are marked as STARs (includign singletones)
template <typename IT>
void StarCheck(FullyDistVec<IT, IT>& parents, FullyDistVec<IT,short>& stars)
{
// this is done here so that in the first iteration, we don't process STAR vertices
// all current nonstars
FullyDistSpVec<IT,short> nonStars(stars, [](short isStar){return isStar==NONSTAR;});
// initialize all nonstars to stars
stars.Apply([](short isStar){return isStar==NONSTAR? STAR: isStar;});
// parents of all current nonstars
FullyDistSpVec<IT, IT> pOfNonStars = EWiseApply<IT>(nonStars, parents,
[](short isStar, IT p){return p;},
[](short isStar, IT p){return true;},
false, static_cast<short>(0));
// parents of all current nonstars indexed by parent
// any vertex with a child should be here
// leaves are not present as indices, but roots are present
FullyDistSpVec<IT,short> pOfNonStarsIdx = Assign(pOfNonStars, NONSTAR);
// copy parent information (the values are grandparents)
FullyDistSpVec<IT,IT> gpOfNonStars_pindexed = EWiseApply<IT>(pOfNonStarsIdx, parents,
[](short isStar, IT p){return p;},
[](short isStar, IT p){return true;},
false, static_cast<short>(0));
// identify if they are parents/grandparents of a vertex with level > 2
FullyDistSpVec<IT,IT> temp = gpOfNonStars_pindexed;
temp.setNumToInd();
gpOfNonStars_pindexed = EWiseApply<IT>(temp, gpOfNonStars_pindexed,
[](IT p, IT gp){return gp;},
[](IT p, IT gp){return p!=gp;},
false, false, static_cast<IT>(0), static_cast<IT>(0));
// index has parents of vertices with level > 2
// value has grand parents of vertices with level > 2
// update parents
// All vertices (except the root and leave ) in a non-star tree will be updated
stars.EWiseApply(gpOfNonStars_pindexed, [](short isStar, IT idx){return static_cast<short>(NONSTAR);},
false, static_cast<IT>(NONSTAR));
// now everything is updated except the root and leaves of nonstars
// identify roots (indexed by level-1 vertices)
FullyDistSpVec<IT,IT> rootsOfNonStars = EWiseApply<IT>(pOfNonStars, stars,
[](IT p, short isStar){return p;},
[](IT p, short isStar){return isStar==NONSTAR;},
false, static_cast<IT>(0));
FullyDistSpVec<IT,short> rootsOfNonStarsIdx = Assign(rootsOfNonStars, NONSTAR);
stars.Set( rootsOfNonStarsIdx);
// remaining vertices
// they must be stars (created after the shortcut) or level-1 leaves of a non-star
FullyDistSpVec<IT,IT> pOflevel1V = EWiseApply<IT>(nonStars, stars,
[](short s, short isStar){return static_cast<IT> (s);},
[](short s, short isStar){return isStar==STAR;},
false, static_cast<short>(0));
pOflevel1V = EWiseApply<IT>(pOflevel1V, parents,
[](IT s, IT p){return p;},
[](IT s, IT p){return true;},
false, static_cast<IT>(0));
FullyDistSpVec<IT,short> isParentStar = Extract(stars, pOflevel1V);
stars.Set(isParentStar);
}
template <typename IT, typename NT, typename DER>
FullyDistSpVec<IT, IT> ConditionalHook(const SpParMat<IT, NT, DER>& A, FullyDistVec<IT, IT>& parent, FullyDistVec<IT, short> stars, int iteration)
{
#ifdef CC_TIMING
double t1 = MPI_Wtime();
#endif
FullyDistVec<IT, IT> minNeighborparent (A.getcommgrid());
minNeighborparent = SpMV<Select2ndMinSR<NT, IT>>(A, parent); // value is the minimum of all neighbors' parents
#ifdef CC_TIMING
double tspmv = MPI_Wtime() - t1;
#endif
FullyDistSpVec<IT,IT> hooksMNP(stars, [](short isStar){ return isStar == STAR; });
hooksMNP = EWiseApply<IT>(hooksMNP, minNeighborparent, [](IT x, IT mnp){ return mnp; },
[](IT x, IT mnp){ return true; }, false, static_cast<IT> (0));
hooksMNP = EWiseApply<IT>(hooksMNP, parent, [](IT mnp, IT p){ return mnp; },
[](IT mnp, IT p){ return p > mnp; }, false, static_cast<IT> (0));
FullyDistSpVec<IT, IT> finalhooks(A.getcommgrid());
if(iteration == 1)
{
finalhooks = hooksMNP;
}
else
{
FullyDistSpVec<IT,IT> hooksP = EWiseApply<IT>(hooksMNP, parent, [](IT mnp, IT p){ return p; },
[](IT mnp, IT p){ return true; }, false, static_cast<IT> (0));
finalhooks = Assign(hooksP, hooksMNP);
}
parent.Set(finalhooks);
#ifdef CC_TIMING
double tall = MPI_Wtime() - t1;
std::ostringstream outs;
outs.str("");
outs.clear();
outs << " Conditional Hooking Time: SpMV: " << tspmv << " Other: "<< tall-tspmv;
outs<< endl;
SpParHelper::Print(outs.str());
#endif
return finalhooks;
}
template <typename IT, typename NT, typename DER>
FullyDistSpVec<IT, IT> UnconditionalHook2(const SpParMat<IT, NT, DER>& A, FullyDistVec<IT, IT>& parents, FullyDistVec<IT, short> stars)
{
#ifdef CC_TIMING
double ts = MPI_Wtime();
double t1, tspmv;
#endif
string spmv = "dense";
IT nNonStars = stars.Reduce(std::plus<IT>(), static_cast<IT>(0), [](short isStar){ return static_cast<IT>(isStar == NONSTAR); });
IT nv = A.getnrow();
FullyDistSpVec<IT, IT> hooks(A.getcommgrid(), nv);
if(nNonStars * 50 < nv) // use SpMSpV
{
spmv = "sparse";
FullyDistSpVec<IT,IT> nonStars(stars, [](short isStar){ return isStar == NONSTAR; });
FullyDistSpVec<IT, IT> pOfNonStars = EWiseApply<IT>(nonStars, parents,
[](short isStar, IT p){return p;},
[](short isStar, IT p){return true;},
false, static_cast<IT>(0));
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
SpMV<Select2ndMinSR<NT, IT>>(A, pOfNonStars, hooks, false);
#ifdef CC_TIMING
tspmv = MPI_Wtime() - t1;
#endif
hooks = EWiseApply<IT>(hooks, stars, [](IT mnp, short isStar){ return mnp; },
[](IT mnp, short isStar){ return isStar == STAR; },
false, static_cast<IT> (0));
}
else // use SpMV
{
FullyDistVec<IT, IT> parents1 = parents;
parents1.EWiseApply(stars, [nv](IT p, short isStar){ return isStar == STAR? nv: p; });
FullyDistVec<IT, IT> minNeighborParent ( A.getcommgrid());
#ifdef CC_TIMING
t1 = MPI_Wtime();
#endif
minNeighborParent = SpMV<Select2ndMinSR<NT, IT>>(A, parents1); // value is the minimum of all neighbors' parents
#ifdef CC_TIMING
tspmv = MPI_Wtime() - t1;
#endif
hooks = minNeighborParent.Find([nv](IT mnf){ return mnf != nv; });
hooks = EWiseApply<IT>(hooks, stars, [](IT mnp, short isStar){ return mnp; },
[](IT mnp, short isStar){ return isStar==STAR; },
false, static_cast<IT> (0));
}
FullyDistSpVec<IT,IT> hooksP = EWiseApply<IT>(hooks, parents, [](IT mnp, IT p){return p;},
[](IT mnp, IT p){return true;}, false, static_cast<IT> (0));
FullyDistSpVec<IT, IT> finalHooks = Assign(hooksP, hooks);
parents.Set(finalHooks);
#ifdef CC_TIMING
double tall = MPI_Wtime() - ts;
std::ostringstream outs;
outs.str("");
outs.clear();
outs << " Unconditional Hooking Time " << spmv << " : " << tspmv << " Other: "<< tall-tspmv;
outs<< endl;
SpParHelper::Print(outs.str());
#endif
return finalHooks;
}
template <typename IT>
void Shortcut(FullyDistVec<IT, IT>& parent)
{
FullyDistVec<IT, IT> grandparent = parent(parent);
parent = grandparent; // we can do it unconditionally because it is trivially true for stars
}
// before shortcut, we will make all remaining start as inactive
// shortcut only on nonstar vertices
// then find stars on nonstar vertices
template <typename IT>
void Shortcut(FullyDistVec<IT, IT>& parents, FullyDistVec<IT,short> stars)
{
FullyDistSpVec<IT,short> spNonStars(stars, [](short isStar){return isStar==NONSTAR;});
FullyDistSpVec<IT, IT> parentsOfNonStars = EWiseApply<IT>(spNonStars, parents,
[](short isStar, IT p){return p;},
[](short isStar, IT p){return true;},
false, static_cast<short>(0));
FullyDistSpVec<IT,IT> grandParentsOfNonStars = Extract(parents, parentsOfNonStars);
parents.Set(grandParentsOfNonStars);
}
template <typename IT, typename NT, typename DER>
bool neigborsInSameCC(const SpParMat<IT,NT,DER>& A, FullyDistVec<IT, IT>& cclabel)
{
FullyDistVec<IT, IT> minNeighborCCLabel ( A.getcommgrid());
minNeighborCCLabel = SpMV<Select2ndMinSR<NT, IT>>(A, cclabel);
return minNeighborCCLabel==cclabel;
}
// works only on P=1
template <typename IT, typename NT, typename DER>
void Correctness(const SpParMat<IT,NT,DER>& A, FullyDistVec<IT, IT>& cclabel, IT nCC, FullyDistVec<IT,IT> parent)
{
DER* spSeq = A.seqptr(); // local submatrix
for(auto colit = spSeq->begcol(); colit != spSeq->endcol(); ++colit) // iterate over columns
{
IT j = colit.colid(); // local numbering
for(auto nzit = spSeq->begnz(colit); nzit < spSeq->endnz(colit); ++nzit)
{
IT i = nzit.rowid();
if( cclabel[i] != cclabel[j])
{
std::cout << i << " (" << parent[i] << ", "<< cclabel[i] << ")& "<< j << "("<< parent[j] << ", " << cclabel[j] << ")\n";
}
}
}
}
// Input:
// parent: parent of each vertex. parent is essentilly the root of the star which a vertex belongs to.
// parent of the root is itself
// Output:
// cclabel: connected components are incrementally labeled
// returns the number of connected components
// Example: input = [0, 0, 2, 3, 0, 2], output = (0, 0, 1, 2, 0, 1), return 3
template <typename IT>
IT LabelCC(FullyDistVec<IT, IT>& parent, FullyDistVec<IT, IT>& cclabel)
{
cclabel = parent;
cclabel.ApplyInd([](IT val, IT ind){return val==ind ? -1 : val;});
FullyDistSpVec<IT, IT> roots (cclabel, bind2nd(std::equal_to<IT>(), -1));
// parents of leaves are still correct
FullyDistSpVec<IT, IT> pOfLeaves (cclabel, bind2nd(std::not_equal_to<IT>(), -1));
roots.nziota(0);
cclabel.Set(roots);
FullyDistSpVec<IT,IT> labelOfParents = Extract(cclabel, pOfLeaves);
cclabel.Set(labelOfParents);
//cclabel = cclabel(parent);
return roots.getnnz();
}
template <typename IT, typename NT, typename DER>
FullyDistVec<IT, IT> CC(SpParMat<IT,NT,DER>& A, IT& nCC)
{
IT nrows = A.getnrow();
FullyDistVec<IT,IT> parent(A.getcommgrid());
parent.iota(nrows, 0); // parent(i)=i initially
FullyDistVec<IT, short> stars(A.getcommgrid(), nrows, STAR); // initially every vertex belongs to a star
int iteration = 1;
std::ostringstream outs;
// isolated vertices are marked as converged
NT NullBValue;
FullyDistVec<int64_t, NT> degree = A.Reduce(Column, plus<NT>(), NullBValue, [](NT val) { return val; }); // printf("%d\n", val.overhang);
stars.EWiseApply(degree, [](short isStar, NT degree) { return degree.overhang == 0? CONVERGED: isStar; }); // printf("--%d\n", degree.overhang);
int nthreads = 1;
#ifdef THREADED
#pragma omp parallel
{
nthreads = omp_get_num_threads();
}
#endif
SpParMat<IT, bool, SpDCCols <IT, bool>> Abool = A;
Abool.ActivateThreading(nthreads*4);
while (true)
{
#ifdef CC_TIMING
double t1 = MPI_Wtime();
#endif
// GGGG: NT doesn't matter here
FullyDistSpVec<IT, IT> condhooks = ConditionalHook(Abool, parent, stars, iteration);
#ifdef CC_TIMING
double t_cond_hook = MPI_Wtime() - t1;
t1 = MPI_Wtime();
#endif
// Any iteration other than the first iteration,
// a non-star is formed after a conditional hooking
// In the first iteration, we can hook two vertices to create a star
// After the first iteratio, only singletone CCs reamin isolated
// Here, we are ignoring the first iteration (still correct, but may ignore few possible
// unconditional hooking in the first iteration)
// remove cond hooks from stars
if(iteration > 1)
{
// GGGG: NT doesn't matter here
StarCheckAfterHooking(Abool, parent, stars, condhooks, true);
}
else
{
// explain
stars.EWiseApply(condhooks, [](short isStar, IT x){ return static_cast<short>(NONSTAR); },
false, static_cast<IT>(NONSTAR));
FullyDistSpVec<IT, short> pNonStar= Assign(condhooks, NONSTAR);
stars.Set(pNonStar);
// it does not create any cycle in the unconditional hooking, see the proof in the paper
}
#ifdef CC_TIMING
double t_starcheck1 = MPI_Wtime() - t1;
t1 = MPI_Wtime();
#endif
FullyDistSpVec<IT, IT> uncondHooks = UnconditionalHook2(Abool, parent, stars);
#ifdef CC_TIMING
double t_uncond_hook = MPI_Wtime() - t1;
t1 = MPI_Wtime();
#endif
if(iteration > 1)
{
StarCheckAfterHooking(Abool, parent, stars, uncondHooks, false);
stars.Apply([](short isStar){return isStar==STAR? CONVERGED: isStar;});
}
else
{
// explain
stars.EWiseApply(uncondHooks, [](short isStar, IT x){return static_cast<short>(NONSTAR);},
false, static_cast<IT>(NONSTAR));
}
IT nconverged = stars.Reduce(std::plus<IT>(), static_cast<IT>(0), [](short isStar){return static_cast<IT>(isStar==CONVERGED);});
if(nconverged==nrows)
{
outs.clear();
outs << "Iteration: " << iteration << " converged: " << nrows << " stars: 0" << " nonstars: 0" ;
outs<< endl;
SpParHelper::Print(outs.str());
break;
}
#ifdef CC_TIMING
double t_starcheck2 = MPI_Wtime() - t1;
t1 = MPI_Wtime();
#endif
Shortcut(parent, stars);
#ifdef CC_TIMING
double t_shortcut = MPI_Wtime() - t1;
t1 = MPI_Wtime();
#endif
StarCheck(parent, stars);
#ifdef CC_TIMING
double t_starcheck = MPI_Wtime() - t1;
t1 = MPI_Wtime();
#endif
IT nonstars = stars.Reduce(std::plus<IT>(), static_cast<IT>(0), [](short isStar){return static_cast<IT>(isStar==NONSTAR);});
IT nstars = nrows - (nonstars + nconverged);
double t2 = MPI_Wtime();
outs.str("");
outs.clear();
outs << "Iteration: " << iteration << " converged: " << nconverged << " stars: " << nstars << " nonstars: " << nonstars;
#ifdef CC_TIMING
//outs << " Time: t_cond_hook: " << t_cond_hook << " t_starcheck1: " << t_starcheck1 << " t_uncond_hook: " << t_uncond_hook << " t_starcheck2: " << t_starcheck2 << " t_shortcut: " << t_shortcut << " t_starcheck: " << t_starcheck;
#endif
outs << endl;
SpParHelper::Print(outs.str());
iteration++;
}
FullyDistVec<IT, IT> cc(parent.getcommgrid());
nCC = LabelCC(parent, cc);
//Correctness(A, cc, nCC, parent);
return cc;
}
template <typename IT>
void PrintCC(FullyDistVec<IT, IT> CC, IT nCC)
{
for(IT i=0; i< nCC; i++)
{
FullyDistVec<IT, IT> ith = CC.FindInds(bind2nd(std::equal_to<IT>(), i));
ith.DebugPrint();
}
}
// Print the size of the first 4 clusters
template <typename IT>
void First4Clust(FullyDistVec<IT, IT>& cc)
{
FullyDistSpVec<IT, IT> cc1 = cc.Find([](IT label){return label==0;});
FullyDistSpVec<IT, IT> cc2 = cc.Find([](IT label){return label==1;});
FullyDistSpVec<IT, IT> cc3 = cc.Find([](IT label){return label==2;});
FullyDistSpVec<IT, IT> cc4 = cc.Find([](IT label){return label==3;});
std::ostringstream outs;
outs.str("");
outs.clear();
outs << "Size of the first component: " << cc1.getnnz() << std::endl;
outs << "Size of the second component: " << cc2.getnnz() << std::endl;
outs << "Size of the third component: " << cc3.getnnz() << std::endl;
outs << "Size of the fourth component: " << cc4.getnnz() << std::endl;
SpParHelper::Print(outs.str());
}
template <typename IT>
void HistCC(FullyDistVec<IT, IT> CC, IT nCC)
{
FullyDistVec<IT, IT> ccSizes(CC.getcommgrid(), nCC, 0);
for(IT i = 0; i < nCC; i++)
{
FullyDistSpVec<IT, IT> ith = CC.Find(bind2nd(std::equal_to<IT>(), i));
ccSizes.SetElement(i, ith.getnnz());
}
IT largestCCSise = ccSizes.Reduce(maximum<IT>(), static_cast<IT>(0));
const IT * locCCSizes = ccSizes.GetLocArr();
int numBins = 200;
std::vector<IT> localHist(numBins,0);
for(IT i=0; i< ccSizes.LocArrSize(); i++)
{
IT bin = (locCCSizes[i]*(numBins-1))/largestCCSise;
localHist[bin]++;
}
std::vector<IT> globalHist(numBins,0);
MPI_Comm world = CC.getcommgrid()->GetWorld();
MPI_Reduce(localHist.data(), globalHist.data(), numBins, MPIType<IT>(), MPI_SUM, 0, world);
int myrank;
MPI_Comm_rank(world,&myrank);
if(myrank==0)
{
std::cout << "The largest component size: " << largestCCSise << std::endl;
std::ofstream output;
output.open("hist.txt", std::ios_base::app );
std::copy(globalHist.begin(), globalHist.end(), std::ostream_iterator<IT> (output, " "));
output << std::endl;
output.close();
}
}
} |
selramp.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#include<omp.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
static PyObject *selramp(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *selramp(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *x,*y, *rampparams;
double goal,r0,r1,r2,x0,pm;
int i;
npy_intp dims[1];
// etc = PyList_New(0);
static char *kwlist[] = {"rampparams","x","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&rampparams,&x,&etc))
{
return NULL;
}
goal = IND(rampparams,0);
r0 = IND(rampparams,1);
r1 = IND(rampparams,2);
r2 = IND(rampparams,3);
x0 = IND(rampparams,4);
pm = IND(rampparams,5);
dims[0] = x->dimensions[0];
y = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
IND(y,i) = goal + pm*exp(-r0*IND(x,i)+r1) + r2*(IND(x,i)-x0);
}
return PyArray_Return(y);
}
static char module_docstring[]="\
This function NEEDS A DOC_STRING.\n\
\n\
Parameters\n\
----------\n\
\n\
Returns\n\
-------\n\
\n\
Revisions\n\
---------\n\
2010-07-30 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\
Original version\n\n\
2010-12-24 Nate Lust, UCF\n\
natelust at linux dot com\n\
Converted to C\n\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\n\
";
static PyMethodDef module_methods[] = {
{"selramp",(PyCFunction)selramp,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_selramp(void)
#else
initselramp(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"selramp", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("selramp", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
LAGraph_bfs_pushpull.c | //------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: push-pull breadth-first search
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2020 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search,
// contributed by Tim Davis, Texas A&M.
// LAGraph_bfs_pushpull computes the BFS of a graph from a single given
// source node. The result is a vector v where v(i)=k if node i was placed
// at level k in the BFS.
// Usage:
// info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ;
// GrB_Vector *v: a vector containing the result, created on output.
// v(i) = k is the BFS level of node i in the graph, where a source
// node has v(source)=1. v(i) is implicitly zero if it is unreachable
// from the source node. That is, GrB_Vector_nvals (&nreach,v) is the
// size of the reachable set of the source node, for a single-source
// BFS. v may be returned as sparse, or full. If full, v(i)=0
// indicates that node i was not reached. If sparse, the pattern of v
// indicates the set of nodes reached.
// GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing.
// pi(source) = source+1 for source node. pi(i) = p+1 if p is the
// parent of i. If pi is sparse, and pi(i) is not present, then node
// i has not been reached. Otherwise, if pi is full, then pi(i)=0
// indicates that node i was not reached.
// GrB_Matrix A: a square matrix of any type. The values of A are not
// accessed. The presence of the entry A(i,j) indicates the edge
// (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge.
// GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm
// is a conventional push-only BFS. If not NULL, AT must be the
// transpose of A, and a push-pull algorithm is used (NOTE: this
// assumes GraphBLAS stores its matrix in CSR form; see discussion
// below). Results are undefined if AT is not NULL but not identical
// to the transpose of A.
// int64_t source: the source node for the BFS.
// int64_t max_level: An optional limit on the levels searched for the
// single-source BFS. If zero, then no limit is enforced. If > 0,
// then only nodes with v(i) <= max_level will be visited. That is:
// 1: just the source node, 2: the source and its neighbors, 3: the
// source node, its neighbors, and their neighbors, etc.
// bool vsparse: if the result v may remain very sparse, then set this
// parameter to true. If v might have many entries, set it false. If
// you are unsure, then set it to true. This parameter speeds up
// the handling of v. If you guess wrong, there is a slight
// performance penalty. The results are not affected by this
// parameter, just the performance. This parameter is used only for
// the single-source BFS.
// single-source BFS:
// Given a graph A, a source node, find all nodes reachable from the
// source node. v(source)=1, v(i)=2 if edge (source,i) appears in the
// graph, and so on. If node i is not reachable from source, then
// implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not
// an entry in this vector.
// This algorithm can use the push-pull strategy, which requires both A and
// AT=A' to be passed in. If the graph is known to be symmetric, then the same
// matrix A can be passed in for both arguments. Results are undefined if AT
// is not the transpose of A.
// If only A or AT is passed in, then only single strategy will be used: push
// or pull, but not both. In general, push-only performs well. A pull-only
// strategy is possible but it is exceedingly slow. Assuming A and AT are both
// in CSR format, then (let s = source node):
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!)
// If A and AT are both in CSC format, then:
// LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest)
// LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good)
// LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!)
// Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS
// detects this case and refuses to do it.
// The basic step of this algorithm computes A'*q where q is the 'queue' of
// nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' =
// A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing,
// just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is
// simultaneously a row and column vector, so q and q' are interchangeable.
// To implement an efficient BFS using GraphBLAS, an assumption must be made in
// LAGraph about how the matrix is stored, whether by row or by column (or
// perhaps some other opaque data structure). The storage format has a huge
// impact on the relative performance of vxm(q,A) and mxv(AT,q).
// Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily
// accessible. In terms of the graph A, this means that the out-adjacency
// list of node i can be traversed in time O(out-degree of node i).
// If AT is stored by row, then AT(i,:) is the in-adjacency list of node i,
// and traversing row i of AT can be done in O(in-degree of node i) time.
// The CSR (Compressed Sparse Row) format is the default for
// SuiteSparse:GraphBLAS, but no assumption can be made about any particular
// GraphBLAS library implementation.
// If A and AT are both stored by column instead, then A(i,:) is not easy to
// access. Instead, A(:,i) is the easily-accessible in-adjacency of node i,
// and AT(:,i) is the out-adjancency.
// A push step requires the out-adjacencies of each node, where as
// a pull step requires the in-adjacencies of each node.
// vxm(q,A) = A'*q, with A stored by row: a push step
// mxv(AT,q) = A'*q, with AT stored by row: a pull step
// vxm(q,A) = A'*q, with A stored by col: a pull step
// mxv(AT,q) = A'*q, with AT stored by col: a push step
// The GraphBLAS data structure is opaque. An implementation may decide to
// store the matrix A in both formats, internally, so that it easily traverse
// both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i)
// can both be easily traversed). This would make a push-pull BFS easy to
// implement using just the opaque GrB_Matrix A, but it doubles the storage.
// Deciding which format to use automatically is not a simple task,
// particularly since the decision must work well throughout GraphBLAS, not
// just for the BFS.
// MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column).
// As a result, the MATLAB expression x=AT*q is a push step, computed using a
// saxpy-based algorithm internally, and x=A'*q is a pull step, computed using
// a dot product.
// SuiteSparse:GraphBLAS can store a matrix in either format, but this requires
// an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where
// f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library
// could be augmented in the future with f = Gxb_BY_BOTH. It currently does
// not select the format automatically. As a result, if GxB_set is not used,
// all its GrB_Matrix objects are stored by row (CSR).
// SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via
// GxB_set) the format, whether by row or by column. The hypersparsity of
// A is selected automatically, with optional hints from the user application,
// but a selection between hypersparsity vs standard CSR and CSC has no effect
// on the push vs pull decision made here.
// The push/pull and saxpy/dot connection can be described as follows.
// Assume for these first two examples that MATLAB stores its matrices in CSR
// format, where accessing A(i,:) is fast.
// If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB
// notation as:
/*
function x = vxm (q,A)
% a push step: compute x = q'*A where q is a column vector
x = sparse (1,n)
for i = 1:n
% a saxpy operation, using the ith row of A and the scalar q(i)
x = x + q (i) * A (i,:)
end
*/
// If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes
// a dot product:
/*
function x = mxv (AT,q)
% a pull step: compute x = AT*q where q is a column vector
for i = 1:n
% a dot-product of the ith row of AT and the column vector q
x (i) = AT (i,:) * q
end
*/
// The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and
// mxv(AT,q) by default, where A and AT are stored by row by default. However,
// they would be very slow in MATLAB, since it stores its sparse matrices in
// CSC format. In that case, if A is stored by column and thus accessing
// A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product
// instead. These two snippets assume the matrices are both in CSR for, and
// thus make more efficient use of MATLAB:
/*
function x = vxm (q,A)
% a pull step: compute x = q'*A where q is a column vector
for j = 1:n
% a dot product of the row vector q' and the jth column of A
x (j) = q' * A (:,j)
end
*/
// If AT is stored by column, then x = mvx(AT,q) is
/*
function x = mxv (AT,q)
% a push step: compute x = AT*q where q is a column vector
for j = 1:n
% a saxpy operation, using the jth column of AT and the scalar q(i)
x = x + AT (:,j) * q
end
*/
// In MATLAB, if q is a sparse column vector and A is a sparse matrix, then
// x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a
// dot product. You can view the code used internally in MATLAB for its sparse
// matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT
// packages, at http://suitesparse.com.
// This raises an interesting puzzle for LAGraph, which is intended on being a
// graph library that can be run on any implementation of GraphBLAS. There are
// no mechanisms in the GraphBLAS C API for LAGraph (or other external packages
// or user applications) to provide hints to GraphBLAS. Likely, there are no
// query mechanisms where LAGraph can ask GraphBLAS how its matrices might be
// stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer
// from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it
// does not answer this query.
// There are two solutions to this puzzle. The most elegant one is for
// GraphBLAS to handle all this internally, and change formats as needed. It
// could choose to store A in both CSR and CSC format, or use an entirely
// different data structure, and it would make the decision between the push or
// pull, at each step of the BFS. This is not a simple task since the API is
// complex. Furthermore, the selection of the data structure for A has
// implications on all other GraphBLAS operations (submatrix assignment and
// extraction, for example).
// However, if A were to be stored in both CSR and CSC format, inside the
// opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would
// become a push-pull BFS.
// The second solution is to allow the user application or library such as
// LAGraph to provide hints and allow it to query the GraphBLAS library.
// There are no such features in the GraphBLAS C API.
// SuiteSparse:GraphBLAS takes the second approach: It adds two functions that
// are extensions to the API: GxB_set changes the format (CSR or CSC), and
// GxB_get can query the format. Even this this simplication,
// SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm
// (per semiring), and selects between them automatically. By default, all of
// its matrices are stored in CSR format (either sparse or hypersparse,
// selected automatically). So if no GxB_* extensions are used, all matrices
// are in CSR format.
// If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this
// particular function assumes that its input matrices are in CSR format, or at
// least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it
// is the responsibilty of this function to select between using a push or a
// pull, for each step in the BFS.
// The following analysis assumes CSR format, and it assumes that dot-product
// (a pull step) can terminate early via a short-circuit rule with the OR
// monoid, as soon as it encounters a TRUE value. This cuts the time for the
// dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse:
// GraphBLAS does (in version 2.3.0 and later). Early termination cannot be
// done for the saxpy (push step) method.
// The work done by the push method (saxpy) is very predictable. BFS uses a
// complemented mask. There is no simple way to exploit a complemented mask,
// and saxpy has no early termination rule. If the set of nodes in the current
// level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree,
// this becomes d*nq where nq = length (q):
// pushwork = d*nq
// The work done by the pull (dot product) method is less predictable. It can
// exploit the complemented mask, and so it only computes (n-nvisited) dot
// products, if nvisited is the # of nodes visited so far (in all levels).
// With no early-termination, the dot product will take d * log2 (nq) time,
// assuming that q is large and a binary search is used internally. That is,
// the dot product will scan through the d entries in A(i,:), and do a binary
// search for each entry in q. To account for the higher constant of a binary
// search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination,
// d is too high. If the nodes are randomly marked, the probability of each
// node being marked is nvisited/n. The expected number of trials until
// success, for a sequence of events with probabilty p, is 1/p. Thus, the
// expected number of iterations in a dot product before an early termination
// is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero.
// However, it cannot exceed d. Thus, the total work for the dot product
// (pull) method can be estimated as:
// per_dot = min (d, n / (nvisited+1))
// pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq)))
// The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later,
// and may be reasonable for other GraphBLAS implementations. Push or pull
// is selected as the one with the least work.
// TODO: change the formula for v3.2.0
// The push/pull decision requires that both A and AT be passed in, but this
// function can use just one or the other. If only A is passed in and AT is
// NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull
// step if A is CSC). If only AT is passed in and A is NULL, then only
// mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is
// CSC).
// In general, while a push-pull strategy is the fastest, a push-only BFS will
// give good peformance. In particular, the time to compute AT=A' plus the
// time for the push-pull BFS is typically higher than just a push-only BFS.
// This why this function does not compute AT=A'. To take advantage of the
// push-pull method, both A and AT must already be available, with the cost to
// construct them amortized across other computations such as this one.
// A pull-only strategy will be *exceeding* slow.
// The input matrix A must be square. It can be non-binary, but best
// performance will be obtained if it is GrB_BOOL. It can have explicit
// entries equal to zero. These are safely ignored, and are treated as
// non-edges.
// SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs.
// In this case, if both matrices are provided, they must be in the same
// format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC
// format, vxm(q,A) is the pull step and mxv(AT,q) is the push step.
// If only A or AT are provided, and the result is a pull-only algorithm,
// an error is returned.
// References:
// Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull
// Efficiently in GraphBLAS. In Proceedings of the 47th International
// Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA,
// Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122
// Scott Beamer, Krste Asanovic and David A. Patterson,
// The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015.
// http://gap.cs.berkeley.edu/
#include "LAGraph_bfs_pushpull.h"
#include "../config.h"
#define LAGRAPH_FREE_ALL \
{ \
GrB_free (&v) ; \
GrB_free (&t) ; \
GrB_free (&q) ; \
GrB_free (&pi) ; \
}
#define LAGRAPH_ERROR(message,info) \
{ \
fprintf (stderr, "LAGraph error: %s\n[%d]\nFile: %s Line: %d\n", \
message, info, __FILE__, __LINE__) ; \
LAGRAPH_FREE_ALL ; \
return (info) ; \
}
#define LAGRAPH_MAX(x,y) (((x) > (y)) ? (x) : (y))
#define LAGRAPH_MIN(x,y) (((x) < (y)) ? (x) : (y))
GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL
(
GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph
GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i.
// if NULL, the parent is not computed.
GrB_Matrix A, // input graph, treated as if boolean in semiring
GrB_Matrix AT, // transpose of A (optional; push-only if NULL)
int64_t source, // starting node of the BFS
int64_t *dest, // optional destination node of the BFS
int64_t max_level, // optional limit of # levels to search
bool vsparse // if true, v is expected to be very sparse
) {
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Vector q = NULL ; // nodes visited at each level
GrB_Vector v = NULL ; // result vector
GrB_Vector t = NULL ; // temporary vector
GrB_Vector pi = NULL ; // parent vector
if(v_output == NULL || (A == NULL && AT == NULL)) {
// required output argument is missing
LAGRAPH_ERROR("required arguments are NULL", GrB_NULL_POINTER) ;
}
(*v_output) = NULL ;
bool compute_tree = (pi_output != NULL) ;
GrB_Index nrows, ncols, nvalA, ignore, nvals ;
// A is provided. AT may or may not be provided
GrB_Matrix_nrows(&nrows, A) ;
GrB_Matrix_ncols(&ncols, A) ;
GrB_Matrix_nvals(&nvalA, A) ;
bool use_vxm_with_A = true ;
// push/pull requires both A and AT
bool push_pull = (A != NULL && AT != NULL) ;
if(nrows != ncols) {
// A must be square
LAGRAPH_ERROR("A must be square", GrB_NULL_POINTER) ;
}
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
GrB_Index n = nrows ;
int nthreads;
Config_Option_get(Config_OPENMP_NTHREAD, &nthreads);
nthreads = LAGRAPH_MIN(n / 4096, nthreads) ;
nthreads = LAGRAPH_MAX(nthreads, 1) ;
// just traverse from the source node
max_level = (max_level <= 0) ? n : LAGRAPH_MIN(n, max_level) ;
// create an empty vector v
GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ;
GrB_Vector_new(&v, int_type, n) ;
// make v dense if requested
int64_t vlimit = LAGRAPH_MAX(256, sqrt((double) n)) ;
if(!vsparse) {
// v is expected to have many entries, so convert v to dense.
// If the guess is wrong, v can be made dense later on.
GrB_assign(v, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// create a scalar to hold the destination value
GrB_Index dest_val ;
GrB_Semiring first_semiring, second_semiring ;
if(compute_tree) {
// create an integer vector q, and set q(source) to source+1
GrB_Vector_new(&q, int_type, n) ;
GrB_Vector_setElement(q, source + 1, source) ;
if(n > INT32_MAX) {
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT64 ;
second_semiring = GxB_ANY_SECOND_INT64 ;
} else {
// terminates as soon as it finds any parent; nondeterministic
first_semiring = GxB_ANY_FIRST_INT32 ;
second_semiring = GxB_ANY_SECOND_INT32 ;
}
// create the empty parent vector
GrB_Vector_new(&pi, int_type, n) ;
if(!vsparse) {
// make pi a dense vector of all zeros
GrB_assign(pi, NULL, NULL, 0, GrB_ALL, n, NULL) ;
}
// pi (source) = source+1 denotes a root of the BFS tree
GrB_Vector_setElement(pi, source + 1, source) ;
} else {
// create a boolean vector q, and set q(source) to true
GrB_Vector_new(&q, GrB_BOOL, n) ;
GrB_Vector_setElement(q, true, source) ;
// terminates as soon as it finds any pair
first_semiring = GxB_ANY_PAIR_BOOL ;
second_semiring = GxB_ANY_PAIR_BOOL ;
}
// average node degree
double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ;
int64_t nvisited = 0 ; // # nodes visited so far
GrB_Index nq = 1 ; // number of nodes in the current level
//--------------------------------------------------------------------------
// BFS traversal and label the nodes
//--------------------------------------------------------------------------
for(int64_t level = 1 ; ; level++) {
//----------------------------------------------------------------------
// set v to the current level, for all nodes in q
//----------------------------------------------------------------------
// v<q> = level: set v(i) = level for all nodes i in q
GrB_assign(v, q, NULL, level, GrB_ALL, n, GrB_DESC_S) ;
//----------------------------------------------------------------------
// check if done
//----------------------------------------------------------------------
nvisited += nq ;
if(nq == 0 || nvisited == n || level >= max_level) break ;
//----------------------------------------------------------------------
// check if destination has been reached, if one is provided
//----------------------------------------------------------------------
if(dest) {
GrB_Info res = GrB_Vector_extractElement(&dest_val, v, *dest) ;
if(res != GrB_NO_VALUE) break ;
}
//----------------------------------------------------------------------
// check if v should be converted to dense
//----------------------------------------------------------------------
if(vsparse && nvisited > vlimit) {
// Convert v from sparse to dense to speed up the rest of the work.
// If this case is triggered, it would have been faster to pass in
// vsparse = false on input.
// v <!v> = 0
GrB_assign(v, v, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ;
GrB_Vector_nvals(&ignore, v) ;
if(compute_tree) {
// Convert pi from sparse to dense, to speed up the work.
// pi<!pi> = 0
GrB_assign(pi, pi, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ;
GrB_Vector_nvals(&ignore, pi) ;
}
vsparse = false ;
}
//----------------------------------------------------------------------
// select push vs pull
//----------------------------------------------------------------------
if(push_pull) {
double pushwork = d * nq ;
double expected = (double) n / (double)(nvisited + 1) ;
double per_dot = LAGRAPH_MIN(d, expected) ;
double binarysearch = (3 * (1 + log2((double) nq))) ;
double pullwork = (n - nvisited) * per_dot * binarysearch ;
use_vxm_with_A = (pushwork < pullwork) ;
}
//----------------------------------------------------------------------
// q = next level of the BFS
//----------------------------------------------------------------------
if(use_vxm_with_A) {
// q'<!v> = q'*A
// this is a push step if A is in CSR format; pull if CSC
GrB_vxm(q, v, NULL, first_semiring, q, A, GrB_DESC_RC) ;
} else {
// q<!v> = AT*q
// this is a pull step if AT is in CSR format; push if CSC
GrB_mxv(q, v, NULL, second_semiring, AT, q, GrB_DESC_RC) ;
}
//----------------------------------------------------------------------
// move to next level
//----------------------------------------------------------------------
if(compute_tree) {
//------------------------------------------------------------------
// assign parents
//------------------------------------------------------------------
// q(i) currently contains the parent of node i in tree (off by one
// so it won't have any zero values, for valued mask).
// pi<q> = q
GrB_assign(pi, q, NULL, q, GrB_ALL, n, GrB_DESC_S) ;
//------------------------------------------------------------------
// replace q with current node numbers
//------------------------------------------------------------------
// TODO this could be a unaryop
// q(i) = i+1 for all entries in q.
GrB_Index *qi ;
bool jumbled ;
int64_t q_size ;
GrB_Index qi_size, qx_size ;
if(n > INT32_MAX) {
int64_t *qx ;
GxB_Vector_export_CSC(&q, &int_type, &n,
&qi, (void **) (&qx), &qi_size, &qx_size, &nq,
&jumbled, NULL) ;
int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ;
nth = LAGRAPH_MAX(nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for(int64_t k = 0 ; k < nq ; k++) {
qx [k] = qi [k] + 1 ;
}
GxB_Vector_import_CSC(&q, int_type, n,
&qi, (void **) (&qx), qi_size, qx_size, nq,
jumbled, NULL) ;
} else {
int32_t *qx ;
GxB_Vector_export_CSC(&q, &int_type, &n,
&qi, (void **) (&qx), &qi_size, &qx_size, &nq,
&jumbled, NULL) ;
int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ;
nth = LAGRAPH_MAX(nth, 1) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for(int32_t k = 0 ; k < nq ; k++) {
qx [k] = qi [k] + 1 ;
}
GxB_Vector_import_CSC(&q, int_type, n,
&qi, (void **) (&qx), qi_size, qx_size, nq,
jumbled, NULL) ;
}
} else {
//------------------------------------------------------------------
// count the nodes in the current level
//------------------------------------------------------------------
GrB_Vector_nvals(&nq, q) ;
}
}
//--------------------------------------------------------------------------
// return the parent vector, if computed
//--------------------------------------------------------------------------
if(compute_tree) {
(*pi_output) = pi ;
pi = NULL ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
(*v_output) = v ; // return result
v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it
LAGRAPH_FREE_ALL ; // free all workspace (except for result v)
return (GrB_SUCCESS) ;
}
|
c-tree.h | /* Definitions for C parsing and type checking.
Copyright (C) 1987, 1993, 1994, 1995, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#ifndef GCC_C_TREE_H
#define GCC_C_TREE_H
#include "c-common.h"
#include "toplev.h"
#include "diagnostic.h"
/* struct lang_identifier is private to c-decl.c, but langhooks.c needs to
know how big it is. This is sanity-checked in c-decl.c. */
#define C_SIZEOF_STRUCT_LANG_IDENTIFIER \
(sizeof (struct c_common_identifier) + 3 * sizeof (void *))
/* Language-specific declaration information. */
struct lang_decl GTY(())
{
char dummy;
};
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */
#define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2 (TYPE)
/* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE
nonzero if the definition of the type has already started. */
#define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0 (TYPE)
/* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable
declarations whose type would be completed by completing that type. */
#define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD (TYPE)
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0 (ID)
struct lang_type GTY(())
{
/* In a RECORD_TYPE, a sorted array of the fields of the type. */
struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields"))) s;
/* In an ENUMERAL_TYPE, the min and max values. */
tree enum_min;
tree enum_max;
/* In a RECORD_TYPE, information specific to Objective-C, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
};
/* Record whether a type or decl was written with nonconstant size.
Note that TYPE_SIZE may have simplified to a constant. */
#define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1 (TYPE)
#define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0 (TYPE)
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was defined without an explicit
return type. */
#define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1 (EXP)
/* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */
#define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2 (EXP)
/* For FUNCTION_DECLs, evaluates true if the decl is built-in but has
been declared. */
#define C_DECL_DECLARED_BUILTIN(EXP) \
DECL_LANG_FLAG_3 (FUNCTION_DECL_CHECK (EXP))
/* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a
built-in prototype and does not have a non-built-in prototype. */
#define C_DECL_BUILTIN_PROTOTYPE(EXP) \
DECL_LANG_FLAG_6 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a decl was declared register. This is strictly a
front-end flag, whereas DECL_REGISTER is used for code generation;
they may differ for structures with volatile fields. */
#define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4 (EXP)
/* Record whether a decl was used in an expression anywhere except an
unevaluated operand of sizeof / typeof / alignof. This is only
used for functions declared static but not defined, though outside
sizeof and typeof it is set for other function decls as well. */
#define C_DECL_USED(EXP) DECL_LANG_FLAG_5 (FUNCTION_DECL_CHECK (EXP))
/* Record whether a label was defined in a statement expression which
has finished and so can no longer be jumped to. */
#define C_DECL_UNJUMPABLE_STMT_EXPR(EXP) \
DECL_LANG_FLAG_6 (LABEL_DECL_CHECK (EXP))
/* Record whether a label was the subject of a goto from outside the
current level of statement expression nesting and so cannot be
defined right now. */
#define C_DECL_UNDEFINABLE_STMT_EXPR(EXP) \
DECL_LANG_FLAG_7 (LABEL_DECL_CHECK (EXP))
/* Record whether a label was defined in the scope of an identifier
with variably modified type which has finished and so can no longer
be jumped to. */
#define C_DECL_UNJUMPABLE_VM(EXP) \
DECL_LANG_FLAG_3 (LABEL_DECL_CHECK (EXP))
/* Record whether a label was the subject of a goto from outside the
current level of scopes of identifiers with variably modified type
and so cannot be defined right now. */
#define C_DECL_UNDEFINABLE_VM(EXP) \
DECL_LANG_FLAG_5 (LABEL_DECL_CHECK (EXP))
/* Record whether a variable has been declared threadprivate by
#pragma omp threadprivate. */
#define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3 (VAR_DECL_CHECK (DECL))
/* Nonzero for a decl which either doesn't exist or isn't a prototype.
N.B. Could be simplified if all built-in decls had complete prototypes
(but this is presently difficult because some of them need FILE*). */
#define C_DECL_ISNT_PROTOTYPE(EXP) \
(EXP == 0 \
|| (TYPE_ARG_TYPES (TREE_TYPE (EXP)) == 0 \
&& !DECL_BUILT_IN (EXP)))
/* For FUNCTION_TYPE, a hidden list of types of arguments. The same as
TYPE_ARG_TYPES for functions with prototypes, but created for functions
without prototypes. */
#define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1 (NODE)
/* Record parser information about an expression that is irrelevant
for code generation alongside a tree representing its value. */
struct c_expr
{
/* The value of the expression. */
tree value;
/* Record the original binary operator of an expression, which may
have been changed by fold, STRING_CST for unparenthesized string
constants, or ERROR_MARK for other expressions (including
parenthesized expressions). */
enum tree_code original_code;
};
/* A kind of type specifier. Note that this information is currently
only used to distinguish tag definitions, tag references and typeof
uses. */
enum c_typespec_kind {
/* A reserved keyword type specifier. */
ctsk_resword,
/* A reference to a tag, previously declared, such as "struct foo".
This includes where the previous declaration was as a different
kind of tag, in which case this is only valid if shadowing that
tag in an inner scope. */
ctsk_tagref,
/* A reference to a tag, not previously declared in a visible
scope. */
ctsk_tagfirstref,
/* A definition of a tag such as "struct foo { int a; }". */
ctsk_tagdef,
/* A typedef name. */
ctsk_typedef,
/* An ObjC-specific kind of type specifier. */
ctsk_objc,
/* A typeof specifier. */
ctsk_typeof
};
/* A type specifier: this structure is created in the parser and
passed to declspecs_add_type only. */
struct c_typespec {
/* What kind of type specifier this is. */
enum c_typespec_kind kind;
/* The specifier itself. */
tree spec;
};
/* A storage class specifier. */
enum c_storage_class {
csc_none,
csc_auto,
csc_extern,
csc_register,
csc_static,
csc_typedef
};
/* A type specifier keyword "void", "_Bool", "char", "int", "float",
"double", or none of these. */
enum c_typespec_keyword {
cts_none,
cts_void,
cts_bool,
cts_char,
cts_int,
cts_float,
cts_double,
cts_dfloat32,
cts_dfloat64,
cts_dfloat128
};
/* A sequence of declaration specifiers in C. */
struct c_declspecs {
/* The type specified, if a single type specifier such as a struct,
union or enum specifier, typedef name or typeof specifies the
whole type, or NULL_TREE if none or a keyword such as "void" or
"char" is used. Does not include qualifiers. */
tree type;
/* The attributes from a typedef decl. */
tree decl_attr;
/* When parsing, the attributes. Outside the parser, this will be
NULL; attributes (possibly from multiple lists) will be passed
separately. */
tree attrs;
/* Any type specifier keyword used such as "int", not reflecting
modifiers such as "short", or cts_none if none. */
enum c_typespec_keyword typespec_word;
/* The storage class specifier, or csc_none if none. */
enum c_storage_class storage_class;
/* Whether any declaration specifiers have been seen at all. */
BOOL_BITFIELD declspecs_seen_p : 1;
/* Whether a type specifier has been seen. */
BOOL_BITFIELD type_seen_p : 1;
/* Whether something other than a storage class specifier or
attribute has been seen. This is used to warn for the
obsolescent usage of storage class specifiers other than at the
start of the list. (Doing this properly would require function
specifiers to be handled separately from storage class
specifiers.) */
BOOL_BITFIELD non_sc_seen_p : 1;
/* Whether the type is specified by a typedef or typeof name. */
BOOL_BITFIELD typedef_p : 1;
/* Whether a struct, union or enum type either had its content
defined by a type specifier in the list or was the first visible
declaration of its tag. */
BOOL_BITFIELD tag_defined_p : 1;
/* Whether the type is explicitly "signed" or specified by a typedef
whose type is explicitly "signed". */
BOOL_BITFIELD explicit_signed_p : 1;
/* Whether the specifiers include a deprecated typedef. */
BOOL_BITFIELD deprecated_p : 1;
/* APPLE LOCAL begin "unavailable" attribute (radar 2809697) */
/* Whether the specifiers include a unavailable typedef. */
BOOL_BITFIELD unavailable_p : 1;
/* APPLE LOCAL end "unavailable" attribute (radar 2809697) */
/* Whether the type defaulted to "int" because there were no type
specifiers. */
BOOL_BITFIELD default_int_p;
/* Whether "long" was specified. */
BOOL_BITFIELD long_p : 1;
/* Whether "long" was specified more than once. */
BOOL_BITFIELD long_long_p : 1;
/* Whether "short" was specified. */
BOOL_BITFIELD short_p : 1;
/* Whether "signed" was specified. */
BOOL_BITFIELD signed_p : 1;
/* Whether "unsigned" was specified. */
BOOL_BITFIELD unsigned_p : 1;
/* Whether "complex" was specified. */
BOOL_BITFIELD complex_p : 1;
/* Whether "inline" was specified. */
BOOL_BITFIELD inline_p : 1;
/* Whether "__thread" was specified. */
BOOL_BITFIELD thread_p : 1;
/* Whether "const" was specified. */
BOOL_BITFIELD const_p : 1;
/* Whether "volatile" was specified. */
BOOL_BITFIELD volatile_p : 1;
/* Whether "restrict" was specified. */
BOOL_BITFIELD restrict_p : 1;
};
/* The various kinds of declarators in C. */
enum c_declarator_kind {
/* An identifier. */
cdk_id,
/* A function. */
cdk_function,
/* An array. */
cdk_array,
/* A pointer. */
cdk_pointer,
/* APPLE LOCAL blocks (C++ ch) */
cdk_block_pointer,
/* Parenthesized declarator with nested attributes. */
cdk_attrs
};
/* Information about the parameters in a function declarator. */
struct c_arg_info {
/* A list of parameter decls. */
tree parms;
/* A list of structure, union and enum tags defined. */
tree tags;
/* A list of argument types to go in the FUNCTION_TYPE. */
tree types;
/* A list of non-parameter decls (notably enumeration constants)
defined with the parameters. */
tree others;
/* A list of VLA sizes from the parameters. In a function
definition, these are used to ensure that side-effects in sizes
of arrays converted to pointers (such as a parameter int i[n++])
take place; otherwise, they are ignored. */
tree pending_sizes;
/* True when these arguments had [*]. */
BOOL_BITFIELD had_vla_unspec : 1;
};
/* A declarator. */
struct c_declarator {
/* The kind of declarator. */
enum c_declarator_kind kind;
/* Except for cdk_id, the contained declarator. For cdk_id, NULL. */
struct c_declarator *declarator;
location_t id_loc; /* Currently only set for cdk_id. */
union {
/* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract
declarator. */
tree id;
/* For functions. */
struct c_arg_info *arg_info;
/* For arrays. */
struct {
/* The array dimension, or NULL for [] and [*]. */
tree dimen;
/* The qualifiers inside []. */
int quals;
/* The attributes (currently ignored) inside []. */
tree attrs;
/* Whether [static] was used. */
BOOL_BITFIELD static_p : 1;
/* Whether [*] was used. */
BOOL_BITFIELD vla_unspec_p : 1;
} array;
/* For pointers, the qualifiers on the pointer type. */
int pointer_quals;
/* For attributes. */
tree attrs;
} u;
};
/* A type name. */
struct c_type_name {
/* The declaration specifiers. */
struct c_declspecs *specs;
/* The declarator. */
struct c_declarator *declarator;
};
/* A parameter. */
struct c_parm {
/* The declaration specifiers, minus any prefix attributes. */
struct c_declspecs *specs;
/* The attributes. */
tree attrs;
/* The declarator. */
struct c_declarator *declarator;
};
/* Save and restore the variables in this file and elsewhere
that keep track of the progress of compilation of the current function.
Used for nested functions. */
struct language_function GTY(())
{
struct c_language_function base;
tree x_break_label;
tree x_cont_label;
struct c_switch * GTY((skip)) x_switch_stack;
struct c_arg_info * GTY((skip)) arg_info;
int returns_value;
int returns_null;
int returns_abnormally;
int warn_about_return_type;
};
/* Save lists of labels used or defined in particular contexts.
Allocated on the parser obstack. */
struct c_label_list
{
/* The label at the head of the list. */
tree label;
/* The rest of the list. */
struct c_label_list *next;
};
/* Statement expression context. */
struct c_label_context_se
{
/* The labels defined at this level of nesting. */
struct c_label_list *labels_def;
/* The labels used at this level of nesting. */
struct c_label_list *labels_used;
/* The next outermost context. */
struct c_label_context_se *next;
};
/* Context of variably modified declarations. */
struct c_label_context_vm
{
/* The labels defined at this level of nesting. */
struct c_label_list *labels_def;
/* The labels used at this level of nesting. */
struct c_label_list *labels_used;
/* The scope of this context. Multiple contexts may be at the same
numbered scope, since each variably modified declaration starts a
new context. */
unsigned scope;
/* The next outermost context. */
struct c_label_context_vm *next;
};
/* in c-parser.c */
extern void c_parse_init (void);
/* in c-aux-info.c */
extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
extern int global_bindings_p (void);
extern void push_scope (void);
extern tree pop_scope (void);
extern void insert_block (tree);
extern void c_expand_body (tree);
extern void c_init_decl_processing (void);
extern void c_dup_lang_specific_decl (tree);
extern void c_print_identifier (FILE *, tree, int);
extern int quals_from_declspecs (const struct c_declspecs *);
extern struct c_declarator *build_array_declarator (tree, struct c_declspecs *,
bool, bool);
extern tree build_enumerator (tree, tree);
extern tree check_for_loop_decls (void);
extern void mark_forward_parm_decls (void);
extern void declare_parm_level (void);
extern void undeclared_variable (tree, location_t);
extern tree declare_label (tree);
extern tree define_label (location_t, tree);
extern void c_maybe_initialize_eh (void);
extern void finish_decl (tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (tree, tree, tree);
extern struct c_arg_info *get_parm_info (bool);
extern tree grokfield (struct c_declarator *, struct c_declspecs *, tree);
extern tree groktypename (struct c_type_name *);
/* APPLE LOCAL blocks 6339747 */
extern tree grokblockdecl (struct c_declspecs *, struct c_declarator *);
extern tree grokparm (const struct c_parm *);
extern tree implicitly_declare (tree);
extern void keep_next_level (void);
extern void pending_xref_error (void);
extern void c_push_function_context (struct function *);
extern void c_pop_function_context (struct function *);
extern void push_parm_decl (const struct c_parm *);
extern struct c_declarator *set_array_declarator_inner (struct c_declarator *,
struct c_declarator *,
bool);
extern tree builtin_function (const char *, tree, int, enum built_in_class,
const char *, tree);
extern void shadow_tag (const struct c_declspecs *);
extern void shadow_tag_warned (const struct c_declspecs *, int);
extern tree start_enum (tree);
extern int start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (enum tree_code, tree);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern tree xref_tag (enum tree_code, tree);
extern struct c_typespec parser_xref_tag (enum tree_code, tree);
extern int c_expand_decl (tree);
extern struct c_parm *build_c_parm (struct c_declspecs *, tree,
struct c_declarator *);
extern struct c_declarator *build_attrs_declarator (tree,
struct c_declarator *);
extern struct c_declarator *build_function_declarator (struct c_arg_info *,
struct c_declarator *);
extern struct c_declarator *build_id_declarator (tree);
extern struct c_declarator *make_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
/* APPLE LOCAL begin radar 5814025 - blocks (C++ cg) */
extern struct c_declarator *make_block_pointer_declarator (struct c_declspecs *,
struct c_declarator *);
/* APPLE LOCAL end radar 5814025 - blocks (C++ cg) */
extern struct c_declspecs *build_null_declspecs (void);
extern struct c_declspecs *declspecs_add_qual (struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_type (struct c_declspecs *,
struct c_typespec);
extern struct c_declspecs *declspecs_add_scspec (struct c_declspecs *, tree);
extern struct c_declspecs *declspecs_add_attrs (struct c_declspecs *, tree);
extern struct c_declspecs *finish_declspecs (struct c_declspecs *);
/* in c-objc-common.c */
extern int c_disregard_inline_limits (tree);
extern int c_cannot_inline_tree_fn (tree *);
extern bool c_objc_common_init (void);
extern bool c_missing_noreturn_ok_p (tree);
extern tree c_objc_common_truthvalue_conversion (tree expr);
extern bool c_warn_unused_global_decl (tree);
extern void c_initialize_diagnostics (diagnostic_context *);
extern bool c_vla_unspec_p (tree x, tree fn);
#define c_build_type_variant(TYPE, CONST_P, VOLATILE_P) \
c_build_qualified_type ((TYPE), \
((CONST_P) ? TYPE_QUAL_CONST : 0) | \
((VOLATILE_P) ? TYPE_QUAL_VOLATILE : 0))
/* in c-typeck.c */
extern int in_alignof;
extern int in_sizeof;
extern int in_typeof;
extern struct c_switch *c_switch_stack;
extern struct c_label_context_se *label_context_stack_se;
extern struct c_label_context_vm *label_context_stack_vm;
extern tree require_complete_type (tree);
extern int same_translation_unit_p (tree, tree);
extern int comptypes (tree, tree);
extern bool c_vla_type_p (tree);
extern bool c_mark_addressable (tree);
extern void c_incomplete_type_error (tree, tree);
extern tree c_type_promotes_to (tree);
extern struct c_expr default_function_array_conversion (struct c_expr);
extern tree composite_type (tree, tree);
extern tree build_component_ref (tree, tree);
extern tree build_array_ref (tree, tree);
extern tree build_external_ref (tree, int, location_t);
extern void pop_maybe_used (bool);
extern struct c_expr c_expr_sizeof_expr (struct c_expr);
extern struct c_expr c_expr_sizeof_type (struct c_type_name *);
extern struct c_expr parser_build_unary_op (enum tree_code, struct c_expr);
extern struct c_expr parser_build_binary_op (enum tree_code, struct c_expr,
struct c_expr);
extern tree build_conditional_expr (tree, tree, tree);
extern tree build_compound_expr (tree, tree);
extern tree c_cast_expr (struct c_type_name *, tree);
extern tree build_c_cast (tree, tree);
extern void store_init_value (tree, tree);
extern void error_init (const char *);
extern void pedwarn_init (const char *);
extern void maybe_warn_string_init (tree, struct c_expr);
extern void start_init (tree, tree, int);
extern void finish_init (void);
extern void really_start_incremental_init (tree);
extern void push_init_level (int);
extern struct c_expr pop_init_level (int);
extern void set_init_index (tree, tree);
extern void set_init_label (tree);
extern void process_init_element (struct c_expr);
extern tree build_compound_literal (tree, tree);
extern tree c_start_case (tree);
extern void c_finish_case (tree);
extern tree build_asm_expr (tree, tree, tree, tree, bool);
extern tree build_asm_stmt (tree, tree);
extern tree c_convert_parm_for_inlining (tree, tree, tree, int);
extern int c_types_compatible_p (tree, tree);
extern tree c_begin_compound_stmt (bool);
extern tree c_end_compound_stmt (tree, bool);
extern void c_finish_if_stmt (location_t, tree, tree, tree, bool);
/* APPLE LOCAL begin for-fsf-4_4 3274130 5295549 */ \
extern void c_finish_loop (location_t, tree, tree, tree, tree, tree, tree,
bool);
/* APPLE LOCAL end for-fsf-4_4 3274130 5295549 */ \
extern tree c_begin_stmt_expr (void);
extern tree c_finish_stmt_expr (tree);
extern tree c_process_expr_stmt (tree);
extern tree c_finish_expr_stmt (tree);
extern tree c_finish_return (tree);
extern tree c_finish_bc_stmt (tree *, bool);
extern tree c_finish_goto_label (tree);
extern tree c_finish_goto_ptr (tree);
extern void c_begin_vm_scope (unsigned int);
extern void c_end_vm_scope (unsigned int);
extern tree c_expr_to_decl (tree, bool *, bool *, bool *);
extern tree c_begin_omp_parallel (void);
extern tree c_finish_omp_parallel (tree, tree);
extern tree c_finish_omp_clauses (tree);
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
extern int current_function_returns_value;
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
extern int current_function_returns_null;
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
extern int current_function_returns_abnormally;
/* Nonzero means we are reading code that came from a system header file. */
extern int system_header_p;
/* True means global_bindings_p should return false even if the scope stack
says we are in file scope. */
extern bool c_override_global_bindings_to_false;
/* True means we've initialized exception handling. */
extern bool c_eh_initialized_p;
/* In c-decl.c */
extern void c_finish_incomplete_decl (tree);
extern void c_write_global_declarations (void);
/* APPLE LOCAL radar 5741070 */
extern tree c_return_interface_record_type (tree);
/* In order for the format checking to accept the C frontend
diagnostic framework extensions, you must include this file before
toplev.h, not after. */
#if GCC_VERSION >= 4001
#define ATTRIBUTE_GCC_CDIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m ,n))) ATTRIBUTE_NONNULL(m)
#else
#define ATTRIBUTE_GCC_CDIAG(m, n) ATTRIBUTE_NONNULL(m)
#endif
extern void pedwarn_c90 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2);
extern void pedwarn_c99 (const char *, ...) ATTRIBUTE_GCC_CDIAG(1,2);
#endif /* ! GCC_C_TREE_H */
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. The fuzz member of
% image defines how much tolerance is acceptable to consider two colors as
% the same. For example, set fuzz to 10 and the color red at intensities of
% 100 and 102 respectively are now interpreted as the same color for the
% purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const DrawInfo *draw_info,const PixelInfo target,
% const ssize_t x_offset,const ssize_t y_offset,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset,
const ssize_t y_offset,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
{ \
segment_info=RelinquishVirtualMemory(segment_info); \
image_view=DestroyCacheView(image_view); \
floodplane_view=DestroyCacheView(floodplane_view); \
floodplane_image=DestroyImage(floodplane_image); \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
} \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
Image
*floodplane_image;
MagickBooleanType
skip,
status;
MemoryInfo
*segment_info;
PixelInfo
fill_color,
pixel;
SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if ((image->alpha_trait == UndefinedPixelTrait) &&
(draw_info->fill.alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,0,0,MagickTrue,exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
floodplane_image->alpha_trait=UndefinedPixelTrait;
floodplane_image->colorspace=GRAYColorspace;
(void) QueryColorCompliance("#000",AllCompliance,
&floodplane_image->background_color,exception);
(void) SetImageBackgroundColor(floodplane_image,exception);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
status=MagickTrue;
start=0;
s=segment_stack;
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
PushSegmentStack(y_offset,x_offset,x_offset,1);
PushSegmentStack(y_offset+1,x_offset,x_offset,-1);
while (s > segment_stack)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
p+=x1*GetPixelChannels(image);
q+=x1*GetPixelChannels(floodplane_image);
for (x=x1; x >= 0; x--)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p-=GetPixelChannels(image);
q-=GetPixelChannels(floodplane_image);
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns-
x,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
status=SyncCacheViewAuthenticPixels(floodplane_view,exception);
if (status == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x <= x2; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
}
start=x;
} while (x <= x2);
}
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Tile fill color onto floodplane.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,p) != 0)
{
GetFillColor(draw_info,x,y,&fill_color,exception);
SetPixelViaPixelInfo(image,&fill_color,q);
}
p+=GetPixelChannels(floodplane_image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelInfo *start_color,
% const PixelInfo *stop_color,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,const StopInfo *stops,
const size_t number_stops,ExceptionInfo *exception)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(stops != (const StopInfo *) NULL);
assert(number_stops > 0);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) (image->columns-1.0)*cosine)+
fabs((double) (image->rows-1.0)*sine);
gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine);
gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine);
gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine);
gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine);
}
gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/
2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt((double) (image->columns-1.0)*
(image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) (image->columns-1.0)/2.0;
gradient->radii.y=(double) (image->rows-1.0)/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) (MagickMin((image->columns-1.0),
(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=number_stops;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memcpy(gradient->stops,stops,(size_t) number_stops*sizeof(*stops));
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramTLS(size_t **histogram)
{
ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramTLS(const size_t count)
{
ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) memset(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramTLS(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**histograms,
width;
ssize_t
center,
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramTLS(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)*
(width/2L)+GetPixelChannels(linear_image)*(width/2L);
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
size_t
*histogram;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
n,
v;
/*
Assign most frequent color.
*/
k=0;
j=0;
count=0;
(void) memset(histogram,0,NumberPaintBins* sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+GetPixelChannels(linear_image)*(u+k))));
histogram[n]++;
if (histogram[n] > count)
{
j=k+u;
count=histogram[n];
}
}
k+=(ssize_t) (linear_image->columns+width);
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(linear_image,i);
PixelTrait traits = GetPixelChannelTraits(linear_image,channel);
PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(paint_traits == UndefinedPixelTrait))
continue;
if ((paint_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(paint_image,channel,p[center+i],q);
continue;
}
SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+
i],q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(paint_image);
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (linear_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(linear_image,OilPaintImageTag,progress,
linear_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramTLS(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill argument.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target,
% const PixelInfo *fill,const MagickBooleanType invert,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
assert(fill != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
ConformPixelInfo(image,fill,&conform_fill,exception);
ConformPixelInfo(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert)
{
PixelTrait
traits;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(image,(Quantum) conform_fill.red,q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(image,(Quantum) conform_fill.green,q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(image,(Quantum) conform_fill.blue,q);
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlack(image,(Quantum) conform_fill.black,q);
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelAlpha(image,(Quantum) conform_fill.alpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,OpaquePaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const PixelInfo *target,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, TransparentPaintImage()
% is not suitable for the operations like chroma, where the tolerance for
% similarity of two color component (RGB) can be different. Thus we define
% this method to take two target pixels (one low and one high) and all the
% pixels of an image which are lying between these two pixels are made
% transparent.
%
% The format of the TransparentPaintImageChroma method is:
%
% MagickBooleanType TransparentPaintImageChroma(Image *image,
% const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
const MagickBooleanType invert,ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (PixelInfo *) NULL);
assert(low != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue :
MagickFalse;
if (match != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
8516.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute simd
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
syr2k.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* syr2k.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "syr2k.h"
/* Array initialization. */
static
void init_array(int n, int m,
DATA_TYPE *alpha,
DATA_TYPE *beta,
DATA_TYPE POLYBENCH_2D(C, N, N, n, n),
DATA_TYPE POLYBENCH_2D(A, N, M, n, m),
DATA_TYPE POLYBENCH_2D(B, N, M, n, m))
{
int i, j;
*alpha = 1.5;
*beta = 1.2;
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
{
A[i][j] = (DATA_TYPE) ((i * j + 1) % n) / n;
B[i][j] = (DATA_TYPE) ((i * j + 2) % m) / m;
}
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
{
C[i][j] = (DATA_TYPE) ((i * j + 3) % n) / m;
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(C, N, N, n, n))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("C");
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
{
if ((i * n + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, C[i][j]);
}
POLYBENCH_DUMP_END("C");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_syr2k(int n, int m,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(C, N, N, n, n),
DATA_TYPE POLYBENCH_2D(A, N, M, n, m),
DATA_TYPE POLYBENCH_2D(B, N, M, n, m))
{
int i, j, k;
#pragma omp parallel for default(shared) private(i, j, k) firstprivate(n, beta, m, alpha, A, B)
for (i = 0; i < _PB_N; i++)
{
for (j = 0; j <= i; j++)
C[i][j] *= beta;
for (k = 0; k < _PB_M; k++)
{
for (j = 0; j <= i; j++)
{
C[i][j] += A[j][k] * alpha * B[i][k] + B[j][k] * alpha * A[i][k];
}
}
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE alpha;
DATA_TYPE beta;
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, N, N, n, n);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, N, M, n, m);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, N, M, n, m);
/* Initialize array(s). */
init_array (n, m, &alpha, &beta,
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_syr2k (n, m,
alpha, beta,
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(C)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
quicksort.h | // -*- C++ -*-
// Copyright (C) 2007-2016 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/quicksort.h
* @brief Implementation of a unbalanced parallel quicksort (in-place).
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Johannes Singler.
#ifndef _GLIBCXX_PARALLEL_QUICKSORT_H
#define _GLIBCXX_PARALLEL_QUICKSORT_H 1
#include <parallel/parallel.h>
#include <parallel/partition.h>
namespace __gnu_parallel
{
/** @brief Unbalanced quicksort divide step.
* @param __begin Begin iterator of subsequence.
* @param __end End iterator of subsequence.
* @param __comp Comparator.
* @param __pivot_rank Desired __rank of the pivot.
* @param __num_samples Choose pivot from that many samples.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
typename std::iterator_traits<_RAIter>::difference_type
__parallel_sort_qs_divide(_RAIter __begin, _RAIter __end,
_Compare __comp, typename std::iterator_traits
<_RAIter>::difference_type __pivot_rank,
typename std::iterator_traits
<_RAIter>::difference_type
__num_samples, _ThreadIndex __num_threads)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
_DifferenceType __n = __end - __begin;
__num_samples = std::min(__num_samples, __n);
// Allocate uninitialized, to avoid default constructor.
_ValueType* __samples = static_cast<_ValueType*>
(::operator new(__num_samples * sizeof(_ValueType)));
for (_DifferenceType __s = 0; __s < __num_samples; ++__s)
{
const unsigned long long __index = static_cast<unsigned long long>
(__s) * __n / __num_samples;
::new(&(__samples[__s])) _ValueType(__begin[__index]);
}
__gnu_sequential::sort(__samples, __samples + __num_samples, __comp);
_ValueType& __pivot = __samples[__pivot_rank * __num_samples / __n];
__gnu_parallel::__binder2nd<_Compare, _ValueType, _ValueType, bool>
__pred(__comp, __pivot);
_DifferenceType __split = __parallel_partition(__begin, __end,
__pred, __num_threads);
for (_DifferenceType __s = 0; __s < __num_samples; ++__s)
__samples[__s].~_ValueType();
::operator delete(__samples);
return __split;
}
/** @brief Unbalanced quicksort conquer step.
* @param __begin Begin iterator of subsequence.
* @param __end End iterator of subsequence.
* @param __comp Comparator.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
void
__parallel_sort_qs_conquer(_RAIter __begin, _RAIter __end,
_Compare __comp,
_ThreadIndex __num_threads)
{
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
if (__num_threads <= 1)
{
__gnu_sequential::sort(__begin, __end, __comp);
return;
}
_DifferenceType __n = __end - __begin, __pivot_rank;
if (__n <= 1)
return;
_ThreadIndex __num_threads_left;
if ((__num_threads % 2) == 1)
__num_threads_left = __num_threads / 2 + 1;
else
__num_threads_left = __num_threads / 2;
__pivot_rank = __n * __num_threads_left / __num_threads;
_DifferenceType __split = __parallel_sort_qs_divide
(__begin, __end, __comp, __pivot_rank,
_Settings::get().sort_qs_num_samples_preset, __num_threads);
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
__parallel_sort_qs_conquer(__begin, __begin + __split,
__comp, __num_threads_left);
#pragma omp section
__parallel_sort_qs_conquer(__begin + __split, __end,
__comp, __num_threads - __num_threads_left);
}
}
/** @brief Unbalanced quicksort main call.
* @param __begin Begin iterator of input sequence.
* @param __end End iterator input sequence, ignored.
* @param __comp Comparator.
* @param __num_threads Number of threads that are allowed to work on
* this part.
*/
template<typename _RAIter, typename _Compare>
void
__parallel_sort_qs(_RAIter __begin, _RAIter __end,
_Compare __comp,
_ThreadIndex __num_threads)
{
_GLIBCXX_CALL(__n)
typedef std::iterator_traits<_RAIter> _TraitsType;
typedef typename _TraitsType::value_type _ValueType;
typedef typename _TraitsType::difference_type _DifferenceType;
_DifferenceType __n = __end - __begin;
// At least one element per processor.
if (__num_threads > __n)
__num_threads = static_cast<_ThreadIndex>(__n);
__parallel_sort_qs_conquer(
__begin, __begin + __n, __comp, __num_threads);
}
} //namespace __gnu_parallel
#endif /* _GLIBCXX_PARALLEL_QUICKSORT_H */
|
SpatialClassNLLCriterion.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/SpatialClassNLLCriterion.c"
#else
#define INITIAL_CHECK \
THArgCheck(THIndexTensor_(nDimensionLegacyAll)(target) == 3, 3, \
"only batches of spatial targets supported (3D tensors)" \
" but got targets of dimension: %d", \
THIndexTensor_(nDimensionLegacyAll)(target)); \
THArgCheck(THTensor_(nDimensionLegacyAll)(input) == 4, 2, \
"only batches of spatial inputs supported (4D tensors), " \
"but got input of dimension: %d", THTensor_(nDimensionLegacyAll)(input)); \
if (weights && THTensor_(nElement)(weights) != THTensor_(size)(input, 1)) { \
THError("weight tensor should be defined either for all or no classes"); \
} \
\
{ \
int64_t input0 = THTensor_(size)(input, 0); \
int64_t input1 = THTensor_(size)(input, 1); \
int64_t input2 = THTensor_(size)(input, 2); \
int64_t input3 = THTensor_(size)(input, 3); \
int64_t target0 = THIndexTensor_(size)(target, 0); \
int64_t target1 = THIndexTensor_(size)(target, 1); \
int64_t target2 = THIndexTensor_(size)(target, 2); \
THAssertMsg(input0 == target0 && input2 == target1 && input3 == target2, \
"size mismatch (got input: %ldx%ldx%ldx%ld, target: %ldx%ldx%ld)", \
input0, input1, input2, input3, target0, target1, target2); \
}
#define GRADOUTPUT_SHAPE_CHECK \
THArgCheck(THTensor_(nDimensionLegacyAll)(gradOutput) == 3, 3, \
"gradOutput must have same dimension as target (3)" \
" but got dimension: %d", \
THTensor_(nDimensionLegacyAll)(gradOutput)); \
{ \
int64_t gradOutput0 = THTensor_(size)(gradOutput, 0); \
int64_t gradOutput1 = THTensor_(size)(gradOutput, 1); \
int64_t gradOutput2 = THTensor_(size)(gradOutput, 2); \
int64_t target0 = THIndexTensor_(size)(target, 0); \
int64_t target1 = THIndexTensor_(size)(target, 1); \
int64_t target2 = THIndexTensor_(size)(target, 2); \
THAssertMsg( \
gradOutput0 == target0 && gradOutput1 == target1 && gradOutput2 == target2, \
"size mismatch (got gradOutput: %ldx%ldx%ld, target: %ldx%ldx%ld)", \
gradOutput0, gradOutput1, gradOutput2, target0, target1, target2); \
}
void THNN_(SpatialClassNLLCriterion_updateOutput)(
THNNState *state,
THTensor *input,
THIndexTensor *target,
THTensor *output,
int64_t reduction,
THTensor *weights,
THTensor *total_weight,
int64_t ignore_index)
{
INITIAL_CHECK;
THTensor_(resize1d)(output, 1);
THTensor_(resize1d)(total_weight, 1);
if (reduction == Reduction::None) {
int64_t batch_size = THTensor_(size)(input, 0);
int64_t H = THTensor_(size)(input, 2);
int64_t W = THTensor_(size)(input, 3);
THTensor_(resize3d)(output, batch_size, H, W);
int64_t b, h, w;
#pragma omp parallel for private(b, h, w)
for (b = 0; b < batch_size; b++) {
for (h = 0; h < H; h++) {
for (w = 0; w < W; w++) {
int64_t cur_target = (int64_t)THIndexTensor_(get3d)(target, b, h, w);
if (cur_target == ignore_index) {
THTensor_(fastSet3d)(output, b, h, w, 0.0f);
continue;
}
scalar_t value = THTensor_(fastGet4d)(input, b, cur_target, h, w);
scalar_t weight = weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f;
THTensor_(fastSet3d)(output, b, h, w, -value * weight);
}
}
}
return;
}
input = THTensor_(newContiguous)(input);
target = THIndexTensor_(newContiguous)(target);
weights = weights ? THTensor_(newContiguous)(weights) : NULL;
scalar_t *input_data = input->data<scalar_t>();
THIndex_t *target_data = THIndexTensor_(data)(target);
scalar_t *weights_data = weights ? weights->data<scalar_t>() : NULL;
scalar_t *output_data = output->data<scalar_t>();
scalar_t *total_weight_data = total_weight->data<scalar_t>();
int64_t batch_size = THTensor_(size)(input, 0);
int64_t n_classes = THTensor_(size)(input, 1);
int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3);
int64_t sample_size = map_size * n_classes;
scalar_t total_weight_acc = 0;
scalar_t output_acc = 0;
for (int b = 0; b < batch_size; b++) {
for (int elem = 0; elem < map_size; elem++) {
int cur_target = target_data[b * map_size + elem];
if (cur_target == ignore_index) continue;
THAssert(cur_target >= 0 && cur_target < n_classes);
scalar_t cur_weight = weights ? weights_data[cur_target] : 1.0f;
total_weight_acc += cur_weight;
output_acc -= input_data[b * sample_size + cur_target * map_size + elem] * cur_weight;
}
}
*total_weight_data = total_weight_acc;
*output_data = output_acc;
if (reduction == Reduction::Mean && *total_weight_data)
*output_data /= *total_weight_data;
c10::raw::intrusive_ptr::decref(input);
THIndexTensor_(free)(target);
if (weights)
c10::raw::intrusive_ptr::decref(weights);
}
void THNN_(SpatialClassNLLCriterion_updateGradInput)(
THNNState *state,
THTensor *input,
THIndexTensor *target,
THTensor *gradOutput,
THTensor *gradInput,
int64_t reduction,
THTensor *weights,
THTensor *total_weight,
int64_t ignore_index)
{
INITIAL_CHECK;
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
THArgCheck(THTensor_(isContiguous)(gradInput), 4,
"gradInput must be contiguous");
THNN_CHECK_SHAPE(input, gradInput);
if (reduction == Reduction::None) {
GRADOUTPUT_SHAPE_CHECK;
int64_t batch_size = THTensor_(size)(input, 0);
int64_t H = THTensor_(size)(input, 2);
int64_t W = THTensor_(size)(input, 3);
int64_t b, h, w;
#pragma omp parallel for private(b, h, w)
for (b = 0; b < batch_size; b++) {
for (h = 0; h < H; h++) {
for (w = 0; w < W; w++) {
int64_t cur_target = (int64_t)THIndexTensor_(get3d)(target, b, h, w);
if (cur_target == ignore_index) {
continue;
}
scalar_t value = -(weights ? THTensor_(fastGetLegacy1dNoScalars)(weights, cur_target) : 1.0f);
scalar_t gradOutput_value = THTensor_(fastGet3d)(gradOutput, b, h, w);
THTensor_(fastSet4d)(gradInput, b, cur_target, h, w, value * gradOutput_value);
}
}
}
return;
}
THNN_CHECK_DIM_SIZE(gradOutput, 1, 0, 1);
scalar_t *total_weight_data = total_weight->data<scalar_t>();
if (*total_weight_data <= 0)
return;
target = THIndexTensor_(newContiguous)(target);
weights = weights ? THTensor_(newContiguous)(weights) : NULL;
THIndex_t *target_data = THIndexTensor_(data)(target);
scalar_t *weights_data = weights ? weights->data<scalar_t>() : NULL;
scalar_t *gradInput_data = gradInput->data<scalar_t>();
int64_t batch_size = THTensor_(size)(input, 0);
int64_t n_classes = THTensor_(size)(input, 1);
int64_t map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3);
int64_t sample_size = map_size * n_classes;
scalar_t normalize = (reduction == Reduction::Mean) ? *total_weight_data : 1.0f;
int b;
#pragma omp parallel for
for (b = 0; b < batch_size; b++) {
int elem;
for (elem = 0; elem < map_size; elem++) {
int cur_target = target_data[b * map_size + elem];
if (cur_target == ignore_index) continue;
THAssert(cur_target >= 0 && cur_target < n_classes);
int index = b * sample_size + cur_target * map_size + elem;
gradInput_data[index] =
-(weights ? weights_data[cur_target] : 1.0f) / normalize * THTensor_(fastGetLegacy1dNoScalars)(gradOutput, 0);
}
}
THIndexTensor_(free)(target);
if (weights)
c10::raw::intrusive_ptr::decref(weights);
}
#undef INITIAL_CHECK
#endif
|
vision.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS IIIII OOO N N %
% V V I SS I O O NN N %
% V V I SSS I O O N N N %
% V V I SS I O O N NN %
% V IIIII SSSSS IIIII OOO N N %
% %
% %
% MagickCore Computer Vision Methods %
% %
% Software Design %
% Cristy %
% September 2014 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/opencl-private.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/vision.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n n e c t e d C o m p o n e n t s I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConnectedComponentsImage() returns the connected-components of the image
% uniquely labeled. The returned connected components image colors member
% defines the number of unique objects. Choose from 4 or 8-way connectivity.
%
% You are responsible for freeing the connected components objects resources
% with this statement;
%
% objects = (CCObjectInfo *) RelinquishMagickMemory(objects);
%
% The format of the ConnectedComponentsImage method is:
%
% Image *ConnectedComponentsImage(const Image *image,
% const size_t connectivity,CCObjectInfo **objects,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o connectivity: how many neighbors to visit, choose from 4 or 8.
%
% o objects: return the attributes of each unique object.
%
% o exception: return any errors or warnings in this structure.
%
*/
static int CCObjectInfoCompare(const void *x,const void *y)
{
CCObjectInfo
*p,
*q;
p=(CCObjectInfo *) x;
q=(CCObjectInfo *) y;
return((int) (q->area-(ssize_t) p->area));
}
static void PerimeterThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
RectangleInfo
bounding_box;
size_t
pattern[4] = { 1, 0, 0, 0 };
ssize_t
y;
/*
Compute perimeter of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=(-1); y < (ssize_t) bounding_box.height+1; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y,bounding_box.width+2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=(-1); x < (ssize_t) bounding_box.width+1; x++)
{
Quantum
pixels[4];
ssize_t
v;
size_t
foreground;
/*
An Algorithm for Calculating Objects’ Shape Features in Binary
Images, Lifeng He, Yuyan Chao.
*/
foreground=0;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
ssize_t
offset;
offset=v*(bounding_box.width+2)*
GetPixelChannels(component_image)+u*
GetPixelChannels(component_image);
pixels[2*v+u]=GetPixelIndex(component_image,p+offset);
if ((ssize_t) pixels[2*v+u] == i)
foreground++;
}
}
if (foreground == 1)
pattern[1]++;
else
if (foreground == 2)
{
if ((((ssize_t) pixels[0] == i) &&
((ssize_t) pixels[3] == i)) ||
(((ssize_t) pixels[1] == i) &&
((ssize_t) pixels[2] == i)))
pattern[0]++; /* diagonal */
else
pattern[2]++;
}
else
if (foreground == 3)
pattern[3]++;
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+
MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5);
}
}
static void CircularityThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
RectangleInfo
bounding_box;
size_t
pattern[4] = { 1, 0, 0, 0 };
ssize_t
y;
/*
Compute perimeter of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=(-1); y < (ssize_t) bounding_box.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1,
bounding_box.y+y,bounding_box.width+2,2,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=(-1); x < (ssize_t) bounding_box.width; x++)
{
Quantum
pixels[4];
ssize_t
v;
size_t
foreground;
/*
An Algorithm for Calculating Objects’ Shape Features in Binary
Images, Lifeng He, Yuyan Chao.
*/
foreground=0;
for (v=0; v < 2; v++)
{
ssize_t
u;
for (u=0; u < 2; u++)
{
ssize_t
offset;
offset=v*(bounding_box.width+2)*
GetPixelChannels(component_image)+u*
GetPixelChannels(component_image);
pixels[2*v+u]=GetPixelIndex(component_image,p+offset);
if ((ssize_t) pixels[2*v+u] == i)
foreground++;
}
}
if (foreground == 1)
pattern[1]++;
else
if (foreground == 2)
{
if ((((ssize_t) pixels[0] == i) &&
((ssize_t) pixels[3] == i)) ||
(((ssize_t) pixels[1] == i) &&
((ssize_t) pixels[2] == i)))
pattern[0]++; /* diagonal */
else
pattern[2]++;
}
else
if (foreground == 3)
pattern[3]++;
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+
MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5);
object[i].metric[metric_index]=4.0*MagickPI*object[i].area/
(object[i].metric[metric_index]*object[i].metric[metric_index]);
}
}
static void MajorAxisThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse major axis of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
}
}
static void MinorAxisThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse major axis of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)-
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
}
}
static void EccentricityThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 },
ellipse_axis = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute eccentricity of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
ellipse_axis.x=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
ellipse_axis.y=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)-
sqrt(4.0*M11*M11+(M20-M02)*(M20-M02))));
object[i].metric[metric_index]=sqrt(1.0-(ellipse_axis.y*ellipse_axis.y*
PerceptibleReciprocal(ellipse_axis.x*ellipse_axis.x)));
}
}
static void AngleThreshold(const Image *component_image,
CCObjectInfo *object,const ssize_t metric_index,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(status) \
magick_number_threads(component_image,component_image,component_image->colors,1)
#endif
for (i=0; i < (ssize_t) component_image->colors; i++)
{
CacheView
*component_view;
double
M00 = 0.0,
M01 = 0.0,
M02 = 0.0,
M10 = 0.0,
M11 = 0.0,
M20 = 0.0;
PointInfo
centroid = { 0.0, 0.0 };
RectangleInfo
bounding_box;
const Quantum
*magick_restrict p;
ssize_t
x;
ssize_t
y;
/*
Compute ellipse angle of each object.
*/
if (status == MagickFalse)
continue;
component_view=AcquireAuthenticCacheView(component_image,exception);
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M00++;
M10+=x;
M01+=y;
}
p+=GetPixelChannels(component_image);
}
}
centroid.x=M10*PerceptibleReciprocal(M00);
centroid.y=M01*PerceptibleReciprocal(M00);
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,p) == i)
{
M11+=(x-centroid.x)*(y-centroid.y);
M20+=(x-centroid.x)*(x-centroid.x);
M02+=(y-centroid.y)*(y-centroid.y);
}
p+=GetPixelChannels(component_image);
}
}
component_view=DestroyCacheView(component_view);
object[i].metric[metric_index]=RadiansToDegrees(1.0/2.0*atan(2.0*M11*
PerceptibleReciprocal(M20-M02)));
if (fabs(M11) < 0.0)
{
if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0))
object[i].metric[metric_index]+=90.0;
}
else
if (M11 < 0.0)
{
if (fabs(M20-M02) >= 0.0)
{
if ((M20-M02) < 0.0)
object[i].metric[metric_index]+=90.0;
else
object[i].metric[metric_index]+=180.0;
}
}
else
if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0))
object[i].metric[metric_index]+=90.0;
}
}
MagickExport Image *ConnectedComponentsImage(const Image *image,
const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception)
{
#define ConnectedComponentsImageTag "ConnectedComponents/Image"
CacheView
*component_view,
*image_view,
*object_view;
CCObjectInfo
*object;
char
*c;
const char
*artifact,
*metrics[CCMaxMetrics];
double
max_threshold,
min_threshold;
Image
*component_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MatrixInfo
*equivalences;
ssize_t
i;
size_t
size;
ssize_t
background_id,
connect4[2][2] = { { -1, 0 }, { 0, -1 } },
connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } },
dx,
dy,
first,
last,
n,
step,
y;
/*
Initialize connected components image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (objects != (CCObjectInfo **) NULL)
*objects=(CCObjectInfo *) NULL;
component_image=CloneImage(image,0,0,MagickTrue,exception);
if (component_image == (Image *) NULL)
return((Image *) NULL);
component_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse)
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Initialize connected components equivalences.
*/
size=image->columns*image->rows;
if (image->columns != (size/image->rows))
{
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception);
if (equivalences == (MatrixInfo *) NULL)
{
component_image=DestroyImage(component_image);
return((Image *) NULL);
}
for (n=0; n < (ssize_t) (image->columns*image->rows); n++)
(void) SetMatrixElement(equivalences,n,0,&n);
object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object));
if (object == (CCObjectInfo *) NULL)
{
equivalences=DestroyMatrixInfo(equivalences);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memset(object,0,MaxColormapSize*sizeof(*object));
for (i=0; i < (ssize_t) MaxColormapSize; i++)
{
object[i].id=i;
object[i].bounding_box.x=(ssize_t) image->columns;
object[i].bounding_box.y=(ssize_t) image->rows;
GetPixelInfo(image,&object[i].color);
}
/*
Find connected components.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++)
{
if (status == MagickFalse)
continue;
dx=connectivity > 4 ? connect8[n][1] : connect4[n][1];
dy=connectivity > 4 ? connect8[n][0] : connect4[n][0];
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel,
target;
ssize_t
neighbor_offset,
obj,
offset,
ox,
oy,
root;
/*
Is neighbor an authentic pixel and a different color than the pixel?
*/
GetPixelInfoPixel(image,p,&pixel);
if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) ||
((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows))
{
p+=GetPixelChannels(image);
continue;
}
neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx*
GetPixelChannels(image);
GetPixelInfoPixel(image,p+neighbor_offset,&target);
if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse)
{
p+=GetPixelChannels(image);
continue;
}
/*
Resolve this equivalence.
*/
offset=y*image->columns+x;
neighbor_offset=dy*image->columns+dx;
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != ox)
{
ox=obj;
status=GetMatrixElement(equivalences,ox,0,&obj);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != oy)
{
oy=obj;
status=GetMatrixElement(equivalences,oy,0,&obj);
}
if (ox < oy)
{
status=SetMatrixElement(equivalences,oy,0,&ox);
root=ox;
}
else
{
status=SetMatrixElement(equivalences,ox,0,&oy);
root=oy;
}
ox=offset;
status=GetMatrixElement(equivalences,ox,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,ox,0,&obj);
status=SetMatrixElement(equivalences,ox,0,&root);
}
oy=offset+neighbor_offset;
status=GetMatrixElement(equivalences,oy,0,&obj);
while (obj != root)
{
status=GetMatrixElement(equivalences,oy,0,&obj);
status=SetMatrixElement(equivalences,oy,0,&root);
}
status=SetMatrixElement(equivalences,y*image->columns+x,0,&root);
p+=GetPixelChannels(image);
}
}
}
/*
Label connected components.
*/
n=0;
component_view=AcquireAuthenticCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
ssize_t
id,
offset;
offset=y*image->columns+x;
status=GetMatrixElement(equivalences,offset,0,&id);
if (id != offset)
status=GetMatrixElement(equivalences,id,0,&id);
else
{
id=n++;
if (id >= (ssize_t) MaxColormapSize)
break;
}
status=SetMatrixElement(equivalences,offset,0,&id);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x >= (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y >= (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].color.red+=QuantumScale*GetPixelRed(image,p);
object[id].color.green+=QuantumScale*GetPixelGreen(image,p);
object[id].color.blue+=QuantumScale*GetPixelBlue(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p);
if (image->colorspace == CMYKColorspace)
object[id].color.black+=QuantumScale*GetPixelBlack(image,p);
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
SetPixelIndex(component_image,(Quantum) id,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(component_image);
}
if (n > (ssize_t) MaxColormapSize)
break;
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
component_view=DestroyCacheView(component_view);
image_view=DestroyCacheView(image_view);
equivalences=DestroyMatrixInfo(equivalences);
if (n > (ssize_t) MaxColormapSize)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"TooManyObjects");
}
background_id=0;
min_threshold=0.0;
max_threshold=0.0;
component_image->colors=(size_t) n;
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].color.red/=(QuantumScale*object[i].area);
object[i].color.green/=(QuantumScale*object[i].area);
object[i].color.blue/=(QuantumScale*object[i].area);
if (image->alpha_trait != UndefinedPixelTrait)
object[i].color.alpha/=(QuantumScale*object[i].area);
if (image->colorspace == CMYKColorspace)
object[i].color.black/=(QuantumScale*object[i].area);
object[i].centroid.x/=object[i].area;
object[i].centroid.y/=object[i].area;
max_threshold+=object[i].area;
if (object[i].area > object[background_id].area)
background_id=i;
}
max_threshold+=MagickEpsilon;
n=(-1);
artifact=GetImageArtifact(image,"connected-components:background-id");
if (artifact != (const char *) NULL)
background_id=(ssize_t) StringToLong(artifact);
artifact=GetImageArtifact(image,"connected-components:area-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max area threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].area < min_threshold) ||
(object[i].area >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:keep-colors");
if (artifact != (const char *) NULL)
{
const char
*p;
/*
Keep selected objects based on color, merge others.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].merge=MagickTrue;
for (p=artifact; ; )
{
char
color[MagickPathExtent];
PixelInfo
pixel;
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,&pixel,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse)
object[i].merge=MagickFalse;
if (*q == '\0')
break;
p=q+1;
}
}
artifact=GetImageArtifact(image,"connected-components:keep-ids");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"connected-components:keep");
if (artifact != (const char *) NULL)
{
/*
Keep selected objects based on id, merge others.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
object[i].merge=MagickTrue;
for (c=(char *) artifact; *c != '\0'; )
{
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].merge=MagickFalse;
}
}
artifact=GetImageArtifact(image,"connected-components:keep-top");
if (artifact != (const char *) NULL)
{
CCObjectInfo
*top_objects;
ssize_t
top_ids;
/*
Keep top objects.
*/
top_ids=(ssize_t) StringToLong(artifact);
top_objects=(CCObjectInfo *) AcquireQuantumMemory(component_image->colors,
sizeof(*top_objects));
if (top_objects == (CCObjectInfo *) NULL)
{
object=(CCObjectInfo *) RelinquishMagickMemory(object);
component_image=DestroyImage(component_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(top_objects,object,component_image->colors*sizeof(*object));
qsort((void *) top_objects,component_image->colors,sizeof(*top_objects),
CCObjectInfoCompare);
for (i=top_ids+1; i < (ssize_t) component_image->colors; i++)
object[top_objects[i].id].merge=MagickTrue;
top_objects=(CCObjectInfo *) RelinquishMagickMemory(top_objects);
}
artifact=GetImageArtifact(image,"connected-components:remove-colors");
if (artifact != (const char *) NULL)
{
const char
*p;
/*
Remove selected objects based on color, keep others.
*/
for (p=artifact; ; )
{
char
color[MagickPathExtent];
PixelInfo
pixel;
const char
*q;
for (q=p; *q != '\0'; q++)
if (*q == ';')
break;
(void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1,
MagickPathExtent));
(void) QueryColorCompliance(color,AllCompliance,&pixel,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse)
object[i].merge=MagickTrue;
if (*q == '\0')
break;
p=q+1;
}
}
artifact=GetImageArtifact(image,"connected-components:remove-ids");
if (artifact == (const char *) NULL)
artifact=GetImageArtifact(image,"connected-components:remove");
if (artifact != (const char *) NULL)
for (c=(char *) artifact; *c != '\0'; )
{
/*
Remove selected objects based on id, keep others.
*/
while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ','))
c++;
first=(ssize_t) strtol(c,&c,10);
if (first < 0)
first+=(ssize_t) component_image->colors;
last=first;
while (isspace((int) ((unsigned char) *c)) != 0)
c++;
if (*c == '-')
{
last=(ssize_t) strtol(c+1,&c,10);
if (last < 0)
last+=(ssize_t) component_image->colors;
}
step=(ssize_t) (first > last ? -1 : 1);
for ( ; first != (last+step); first+=step)
object[first].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:perimeter-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max perimeter threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="perimeter";
PerimeterThreshold(image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:circularity-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max circularity threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="circularity";
CircularityThreshold(image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:diameter-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max diameter threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="diameter";
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].metric[n]=ceil(sqrt(4.0*object[i].area/MagickPI)-0.5);
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
}
artifact=GetImageArtifact(image,"connected-components:major-axis-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse major threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="major-axis";
MajorAxisThreshold(component_image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:minor-axis-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse minor threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="minor-axis";
MinorAxisThreshold(component_image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:eccentricity-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max eccentricity threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="eccentricy";
EccentricityThreshold(component_image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
artifact=GetImageArtifact(image,"connected-components:angle-threshold");
if (artifact != (const char *) NULL)
{
/*
Merge any object not within the min and max ellipse angle threshold.
*/
(void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold);
metrics[++n]="angle";
AngleThreshold(component_image,object,n,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
if (((object[i].metric[n] < min_threshold) ||
(object[i].metric[n] >= max_threshold)) && (i != background_id))
object[i].merge=MagickTrue;
}
/*
Merge any object not within the min and max area threshold.
*/
component_view=AcquireAuthenticCacheView(component_image,exception);
object_view=AcquireVirtualCacheView(component_image,exception);
for (i=0; i < (ssize_t) component_image->colors; i++)
{
RectangleInfo
bounding_box;
size_t
id;
ssize_t
j;
if (status == MagickFalse)
continue;
if ((object[i].merge == MagickFalse) || (i == background_id))
continue; /* keep object */
/*
Merge this object.
*/
for (j=0; j < (ssize_t) component_image->colors; j++)
object[j].census=0;
bounding_box=object[i].bounding_box;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
size_t
k;
if (status == MagickFalse)
continue;
j=(ssize_t) GetPixelIndex(component_image,p);
if (j == i)
for (k=0; k < (ssize_t) (connectivity > 4 ? 4 : 2); k++)
{
const Quantum
*q;
/*
Compute area of adjacent objects.
*/
if (status == MagickFalse)
continue;
dx=connectivity > 4 ? connect8[k][1] : connect4[k][1];
dy=connectivity > 4 ? connect8[k][0] : connect4[k][0];
q=GetCacheViewVirtualPixels(object_view,bounding_box.x+x+dx,
bounding_box.y+y+dy,1,1,exception);
if (q == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
j=(ssize_t) GetPixelIndex(component_image,q);
if (j != i)
object[j].census++;
}
p+=GetPixelChannels(component_image);
}
}
/*
Merge with object of greatest adjacent area.
*/
id=0;
for (j=1; j < (ssize_t) component_image->colors; j++)
if (object[j].census > object[id].census)
id=(size_t) j;
object[i].area=0.0;
for (y=0; y < (ssize_t) bounding_box.height; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(component_view,bounding_box.x,
bounding_box.y+y,bounding_box.width,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) bounding_box.width; x++)
{
if ((ssize_t) GetPixelIndex(component_image,q) == i)
SetPixelIndex(component_image,(Quantum) id,q);
q+=GetPixelChannels(component_image);
}
if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse)
status=MagickFalse;
}
}
object_view=DestroyCacheView(object_view);
component_view=DestroyCacheView(component_view);
artifact=GetImageArtifact(image,"connected-components:mean-color");
if (IsStringTrue(artifact) != MagickFalse)
{
/*
Replace object with mean color.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
component_image->colormap[i]=object[i].color;
}
(void) SyncImage(component_image,exception);
artifact=GetImageArtifact(image,"connected-components:verbose");
if ((IsStringTrue(artifact) != MagickFalse) ||
(objects != (CCObjectInfo **) NULL))
{
/*
Report statistics on each unique object.
*/
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width=0;
object[i].bounding_box.height=0;
object[i].bounding_box.x=(ssize_t) component_image->columns;
object[i].bounding_box.y=(ssize_t) component_image->rows;
object[i].centroid.x=0;
object[i].centroid.y=0;
object[i].census=object[i].area == 0.0 ? 0.0 : 1.0;
object[i].area=0;
}
component_view=AcquireVirtualCacheView(component_image,exception);
for (y=0; y < (ssize_t) component_image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns,
1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) component_image->columns; x++)
{
size_t
id;
id=(size_t) GetPixelIndex(component_image,p);
if (x < object[id].bounding_box.x)
object[id].bounding_box.x=x;
if (x > (ssize_t) object[id].bounding_box.width)
object[id].bounding_box.width=(size_t) x;
if (y < object[id].bounding_box.y)
object[id].bounding_box.y=y;
if (y > (ssize_t) object[id].bounding_box.height)
object[id].bounding_box.height=(size_t) y;
object[id].centroid.x+=x;
object[id].centroid.y+=y;
object[id].area++;
p+=GetPixelChannels(component_image);
}
}
for (i=0; i < (ssize_t) component_image->colors; i++)
{
object[i].bounding_box.width-=(object[i].bounding_box.x-1);
object[i].bounding_box.height-=(object[i].bounding_box.y-1);
object[i].centroid.x=object[i].centroid.x/object[i].area;
object[i].centroid.y=object[i].centroid.y/object[i].area;
}
component_view=DestroyCacheView(component_view);
qsort((void *) object,component_image->colors,sizeof(*object),
CCObjectInfoCompare);
if (objects == (CCObjectInfo **) NULL)
{
ssize_t
j;
artifact=GetImageArtifact(image,
"connected-components:exclude-header");
if (IsStringTrue(artifact) == MagickFalse)
{
(void) fprintf(stdout,"Objects (");
artifact=GetImageArtifact(image,
"connected-components:exclude-ids");
if (IsStringTrue(artifact) == MagickFalse)
(void) fprintf(stdout,"id: ");
(void) fprintf(stdout,"bounding-box centroid area mean-color");
for (j=0; j <= n; j++)
(void) fprintf(stdout," %s",metrics[j]);
(void) fprintf(stdout,"):\n");
}
for (i=0; i < (ssize_t) component_image->colors; i++)
if (object[i].census > 0.0)
{
char
mean_color[MagickPathExtent];
GetColorTuple(&object[i].color,MagickFalse,mean_color);
(void) fprintf(stdout," ");
artifact=GetImageArtifact(image,
"connected-components:exclude-ids");
if (IsStringTrue(artifact) == MagickFalse)
(void) fprintf(stdout,"%.20g: ",(double) object[i].id);
(void) fprintf(stdout,
"%.20gx%.20g%+.20g%+.20g %.1f,%.1f %.*g %s",(double)
object[i].bounding_box.width,(double)
object[i].bounding_box.height,(double)
object[i].bounding_box.x,(double) object[i].bounding_box.y,
object[i].centroid.x,object[i].centroid.y,
GetMagickPrecision(),(double) object[i].area,mean_color);
for (j=0; j <= n; j++)
(void) fprintf(stdout," %.*g",GetMagickPrecision(),
object[i].metric[j]);
(void) fprintf(stdout,"\n");
}
}
}
if (objects == (CCObjectInfo **) NULL)
object=(CCObjectInfo *) RelinquishMagickMemory(object);
else
*objects=object;
return(component_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralImage() returns the sum of values (pixel values) in the image.
%
% The format of the IntegralImage method is:
%
% Image *IntegralImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *IntegralImage(const Image *image,ExceptionInfo *exception)
{
#define IntegralImageTag "Integral/Image"
CacheView
*image_view,
*integral_view;
Image
*integral_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize integral image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return((Image *) NULL);
}
/*
Calculate the sum of values (pixel values) in the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(integral_image,exception);
integral_view=AcquireAuthenticCacheView(integral_image,exception);
for (y=0; y < (ssize_t) integral_image->rows; y++)
{
const Quantum
*magick_restrict p;
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(integral_view,0,y-1,integral_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(integral_view,0,y,integral_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) integral_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(integral_image); i++)
{
double
sum;
PixelTrait traits = GetPixelChannelTraits(integral_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
sum=(double) q[i];
if (x > 0)
sum+=(q-GetPixelChannels(integral_image))[i];
if (y > 0)
sum+=p[i];
if ((x > 0) && (y > 0))
sum-=(p-GetPixelChannels(integral_image))[i];
q[i]=ClampToQuantum(sum);
}
p+=GetPixelChannels(integral_image);
q+=GetPixelChannels(integral_image);
}
sync=SyncCacheViewAuthenticPixels(integral_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
progress++;
proceed=SetImageProgress(integral_image,IntegralImageTag,progress,
integral_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
integral_view=DestroyCacheView(integral_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
integral_image=DestroyImage(integral_image);
return(integral_image);
}
|
templatemath.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* templatemath.h
*
* Created on: Jan 1, 2016
* Author: agibsonccc
*/
#ifndef TEMPLATEMATH_H_
#define TEMPLATEMATH_H_
#include <dll.h>
#include <pointercast.h>
#include <platformmath.h>
#include <DataTypeUtils.h>
#define BFLOAT16_MAX_VALUE 32737.
#define HALF_MAX_VALUE 65504.
#define FLOAT_MAX_VALUE 3.4028235E38
#define DOUBLE_MAX_VALUE 1.7976931348623157E308
#define FLOAT_MIN_NORMAL 1.17549435e-38
#ifndef M_E
#define M_E 2.718281828459
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace nd4j {
#ifdef __CUDACC__
#endif
namespace math {
template<typename T>
math_def inline T nd4j_abs(T value);
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2);
template<typename T>
math_def inline T nd4j_max(T val1, T val2);
template<typename T>
math_def inline T nd4j_min(T val1, T val2);
template <typename T>
math_def inline bool nd4j_eq(T val1, T val2, double eps);
template<typename T, typename Z>
math_def inline Z nd4j_re(T val1, T val2);
template<typename T, typename Z>
math_def inline Z nd4j_rint(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_copysign(T val1, T val2);
template <typename T, typename Z>
math_def inline Z nd4j_softplus(T val);
//#ifndef __CUDACC__
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length);
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_ceil(T val1);
template<typename T>
math_def inline bool nd4j_isnan(T val1);
template<typename T>
math_def inline bool nd4j_isinf(T val1);
template<typename T>
math_def inline bool nd4j_isfin(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_cos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_cosh(T val);
template<typename X, typename Z>
math_def inline Z nd4j_exp(X val);
template<typename T, typename Z>
math_def inline Z nd4j_floor(T val);
template<typename X, typename Z>
math_def inline Z nd4j_log(X val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2);
template<typename T, typename Z>
math_def inline Z nd4j_round(T val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X num, Y denom);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X num, Y denom);
template<typename T, typename Z>
math_def inline Z nd4j_erf(T num);
template<typename T, typename Z>
math_def inline Z nd4j_erfc(T num);
template<typename T, typename Z>
math_def inline Z nd4j_sigmoid(T val) {
return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val));
}
template<typename T, typename Z>
math_def inline Z nd4j_elu(T val, T alpha) {
if (val >= (T) 0.f)
return val;
return static_cast<Z>(alpha) * (nd4j_exp<T, Z>(val) - static_cast<Z>(1.0f));
}
template<typename T, typename Z>
math_def inline Z nd4j_leakyrelu(T val,T alpha) {
if (val < (T) 0.0f)
return alpha * val;
else
return val;
}
template<typename T, typename Z>
math_def inline Z nd4j_eluderivative(T val, T alpha) {
if (val >= static_cast<T>(0.0f))
return static_cast<Z>(1.0f);
return static_cast<Z>(alpha) * nd4j_exp<T, Z>(val);
//return val >= 0.0 ? 1.0 : nd4j_exp(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_sin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_softplus(T val) {
return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val));
}
template<typename T, typename Z>
math_def inline Z nd4j_softsign(T val) {
return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_sqrt(X val);
template<typename X, typename Z>
math_def inline Z nd4j_tanh(X val);
template<typename T, typename Z>
math_def inline Z nd4j_tan(T val);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2) {
return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2));
}
template<typename T, typename Z>
math_def inline Z nd4j_tan(T tval) {
return p_tan<Z>(static_cast<Z>(tval));
}
template<typename T, typename Z>
math_def inline Z nd4j_tanhderivative(T val) {
Z tanh = nd4j_tanh<T,Z>(val);
return (Z) 1.0f - tanh * tanh;
}
template <typename T, typename Z>
math_def inline T nd4j_sigmoidderivative(T val) {
Z sigmoid = nd4j_sigmoid<T,Z>(val);
return sigmoid * ((Z) 1.0f - sigmoid);
}
template<typename T, typename Z>
math_def inline T nd4j_softsignderivative(T val) {
T y = (T) 1.0f + nd4j_abs(val);
return (Z) 1.0f / (y * y);
}
template<typename T, typename Z>
math_def inline T nd4j_sgn(T val) {
return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f;
}
template<typename T, typename Z>
math_def inline Z nd4j_sign(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_signum(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_gamma(X a);
template<typename X, typename Z>
math_def inline Z nd4j_lgamma(X x);
//#ifndef __CUDACC__
/*
template<>
math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) {
float16 dot = (float16) 0.0f;
// TODO: since we can't use simd on unions, we might use something else here.
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
*/
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length) {
Z dot = (Z)0.0f;
for(int e = 0; e < length; e++) {
dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]);
}
return dot;
}
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_acos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sech(T val);
template<typename T, typename Z>
math_def inline Z nd4j_acosh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val) {
//Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x)
return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val);
}
template<typename T, typename Z>
math_def inline Z nd4j_atan(T val);
template<typename T, typename Z>
math_def inline Z nd4j_atanh(T val);
template<>
math_def inline float16 nd4j_abs<float16>(float16 value) {
#ifdef NATIVE_HALFS
if (value < (float16) 0.f) {
return float16(__hneg(value.data));
} else
return value;
#else
return (float16) fabsf((float) value);
#endif
}
template<>
math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) {
return (bfloat16) fabsf((float) value);
}
template<>
math_def inline float nd4j_abs<float>(float value) {
return fabsf(value);
}
template<>
math_def inline double nd4j_abs<double>(double value) {
return fabs(value);
}
template<>
math_def inline int nd4j_abs<int>(int value) {
return abs(value);
}
template<>
math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) {
return llabs(value);
}
template<>
math_def inline bool nd4j_abs<bool>(bool value) {
return value;
}
template<>
math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) {
return value;
}
template<>
math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) {
return value;
}
template<>
math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) {
return value;
}
template<>
math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) {
return value;
}
template<>
math_def inline int8_t nd4j_abs<int8_t>(int8_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline int16_t nd4j_abs<int16_t>(int16_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline bool nd4j_isnan<float16>(float16 value) {
return *(value.data.getXP()) == 0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) {
return value == bfloat16::nan(); //0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<float>(float value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<double>(double value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<float16>(float16 value) {
return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) {
return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<float>(float value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<double>(double value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) {
return false;
}
template<typename T>
math_def inline bool nd4j_isfin(T value) {
return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value);
}
template<>
math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) {
return (float16) copysignf((float) val1, (float) val2);
}
template<>
math_def inline float nd4j_copysign<float>(float val1, float val2) {
return copysignf(val1, val2);
}
template<>
math_def inline double nd4j_copysign<double>(double val1, double val2) {
return copysign(val1, val2);
}
template<>
math_def inline int nd4j_copysign<int>(int val1, int val2) {
if (val2 < 0) return -(nd4j_abs<int>(val1));
else return nd4j_abs<int>(val1);
}
template<>
math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) {
if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1));
else return nd4j_abs<Nd4jLong>(val1);
}
template<>
math_def inline bool nd4j_max(bool val1, bool val2) {
return (val1 || val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_max(T val1, T val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline bool nd4j_min(bool val1, bool val2) {
return (val1 && val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_min(T val1, T val2) {
return val1 < val2 ? val1 : val2;
}
template <typename T>
math_def inline bool nd4j_eq(T d1, T d2, double eps) {
if (nd4j::math::nd4j_isinf<T>(d1) && nd4j::math::nd4j_isinf<T>(d2)) {
if (d1 > 0 && d2 > 0)
return true;
else if (d1 < 0 && d2 < 0)
return true;
else
return false;
}
auto diff = static_cast<double>(nd4j::math::nd4j_abs<T>(d1 - d2));
// works well except in the range of very large numbers
if (diff <= eps)
return true;
// Knuth approach
// works well except in the range of very small numbers
if (diff <= nd4j::math::nd4j_max<double>(nd4j::math::nd4j_abs<double>(static_cast<double>(d1)), nd4j::math::nd4j_abs<double>(static_cast<double>(d2))) * eps)
return true;
return false;
}
template <typename X, typename Z>
math_def inline Z nd4j_ceil(X val) {
return static_cast<Z>(p_ceil<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_round(X val) {
return static_cast<Z>(p_round<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_asin(X val) {
return p_asin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atan(X val) {
return p_atan<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atanh(X val) {
return p_atanh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cosh(X val) {
return p_cosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_rint(X val) {
return p_rint<X>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_sinh(X val) {
return p_sinh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_acos(X val) {
return p_acos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sech(X val) {
return static_cast<Z>(1) / nd4j_cosh<X,Z>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_acosh(X val) {
return p_acosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cos(X val) {
return p_cos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_exp(X val) {
return p_exp<X>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_floor(X val) {
return static_cast<Z>(p_floor<X>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_log(X val) {
return static_cast<Z>(p_log<X>(val));
}
/**
* This func is special case - it must return floating point value, and optionally Y arg can be floating point argument
* @tparam X
* @tparam Y
* @tparam Z
* @param val
* @param val2
* @return
*/
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2) {
return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
/**
* LogGamma(a) - float point extension of ln(n!)
**/
template <typename X, typename Z>
math_def inline Z nd4j_lgamma(X x) {
// if (x <= X(0.0))
// {
// std::stringstream os;
// os << "Logarithm of Gamma has sence only for positive values, but " << x << " was given.";
// throw std::invalid_argument( os.str() );
// }
if (x < X(12.0)) {
return nd4j_log<Z,Z>(nd4j_gamma<X,Z>(x));
}
// Abramowitz and Stegun 6.1.41
// Asymptotic series should be good to at least 11 or 12 figures
// For error analysis, see Whittiker and Watson
// A Course in Modern Analysis (1927), page 252
static const double c[8] = {
1.0/12.0,
-1.0/360.0,
1.0/1260.0,
-1.0/1680.0,
1.0/1188.0,
-691.0/360360.0,
1.0/156.0,
-3617.0/122400.0
};
double z = Z(1.0 / Z(x * x));
double sum = c[7];
for (int i = 6; i >= 0; i--) {
sum *= z;
sum += c[i];
}
double series = sum / Z(x);
static const double halfLogTwoPi = 0.91893853320467274178032973640562;
return Z((double(x) - 0.5) * nd4j_log<X,double>(x) - double(x) + halfLogTwoPi + series);
}
template<typename T>
math_def inline T nd4j_re(T val1, T val2) {
if (val1 == (T) 0.0f && val2 == (T) 0.0f)
return (T) 0.0f;
return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X val, Y val2) {
return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X val, Y val2) {
return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Z>
math_def inline Z nd4j_sin(X val) {
return p_sin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sqrt(X val) {
return p_sqrt<Z>(static_cast<Z>(val));
}
template <typename X>
math_def inline X neg_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(2.0f);
X e = static_cast<X>(M_E);
auto p = nd4j::math::nd4j_pow<X, X, X>(e, val * t);
return (p - o)/ (p + o);
}
template <typename X>
math_def inline X pos_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(-2.0f);
X e = static_cast<X>(M_E);
auto p = nd4j::math::nd4j_pow<X, X, X>(e, val * t);
return (o - p) / (o + p);
}
template <typename X, typename Z>
math_def inline Z nd4j_tanh(X val) {
return val <= 0 ? neg_tanh(val) : pos_tanh(val);
//return p_tanh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_erf(X val) {
return p_erf<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_erfc(X val) {
return p_erfc<Z>(static_cast<Z>(val));
}
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2) {
T temp = val1; val1=val2; val2=temp;
};
template <typename X, typename Z>
math_def inline Z nd4j_gamma(X a) {
// nd4j_lgamma<X,Z>(a);
// return (Z)std::tgamma(a);
// Split the function domain into three intervals:
// (0, 0.001), [0.001, 12), and (12, infinity)
///////////////////////////////////////////////////////////////////////////
// First interval: (0, 0.001)
//
// For small a, 1/Gamma(a) has power series a + gamma a^2 - ...
// So in this range, 1/Gamma(a) = a + gamma a^2 with error on the order of a^3.
// The relative error over this interval is less than 6e-7.
const double eulerGamma = 0.577215664901532860606512090; // Euler's gamma constant
if (a < X(0.001))
return Z(1.0 / ((double)a * (1.0 + eulerGamma * (double)a)));
///////////////////////////////////////////////////////////////////////////
// Second interval: [0.001, 12)
if (a < X(12.0)) {
// The algorithm directly approximates gamma over (1,2) and uses
// reduction identities to reduce other arguments to this interval.
double y = (double)a;
int n = 0;
bool argWasLessThanOne = y < 1.0;
// Add or subtract integers as necessary to bring y into (1,2)
// Will correct for this below
if (argWasLessThanOne) {
y += 1.0;
}
else {
n = static_cast<int>(floor(y)) - 1; // will use n later
y -= n;
}
// numerator coefficients for approximation over the interval (1,2)
static const double p[] = {
-1.71618513886549492533811E+0,
2.47656508055759199108314E+1,
-3.79804256470945635097577E+2,
6.29331155312818442661052E+2,
8.66966202790413211295064E+2,
-3.14512729688483675254357E+4,
-3.61444134186911729807069E+4,
6.64561438202405440627855E+4
};
// denominator coefficients for approximation over the interval (1,2)
static const double q[] = {
-3.08402300119738975254353E+1,
3.15350626979604161529144E+2,
-1.01515636749021914166146E+3,
-3.10777167157231109440444E+3,
2.25381184209801510330112E+4,
4.75584627752788110767815E+3,
-1.34659959864969306392456E+5,
-1.15132259675553483497211E+5
};
double num = 0.0;
double den = 1.0;
double z = y - 1;
for (auto i = 0; i < 8; i++) {
num = (num + p[i]) * z;
den = den * z + q[i];
}
double result = num / den + 1.0;
// Apply correction if argument was not initially in (1,2)
if (argWasLessThanOne) {
// Use identity gamma(z) = gamma(z+1)/z
// The variable "result" now holds gamma of the original y + 1
// Thus we use y-1 to get back the orginal y.
result /= (y - 1.0);
}
else {
// Use the identity gamma(z+n) = z*(z+1)* ... *(z+n-1)*gamma(z)
for (auto i = 0; i < n; i++)
result *= y++;
}
return Z(result);
}
///////////////////////////////////////////////////////////////////////////
// Third interval: [12, infinity)
if (a > 171.624) {
// Correct answer too large to display. Force +infinity.
return Z(DOUBLE_MAX_VALUE);
// return DataTypeUtils::infOrMax<Z>();
}
return nd4j::math::nd4j_exp<Z,Z>(nd4j::math::nd4j_lgamma<X,Z>(a));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igamma(X a, Y x) {
Z aim = nd4j_pow<X, X, Z>(x, a) / (nd4j_exp<X, Z>(x) * nd4j_gamma<Y, Z>(a));
auto sum = Z(0.);
auto denom = Z(1.);
if (a <= X(0.000001))
//throw std::runtime_error("Cannot calculate gamma for a zero val.");
return Z(0);
for (int i = 0; Z(1./denom) > Z(1.0e-12); i++) {
denom *= (a + i);
sum += nd4j_pow<X, int, Z>(x, i) / denom;
}
return aim * sum;
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igammac(X a, Y x) {
return Z(1.) - nd4j_igamma<X, Y, Z>(a, x);
}
#ifdef __CUDACC__
namespace atomics {
template <typename T>
inline __device__ T nd4j_atomicAdd(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicSub(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMul(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicDiv(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMin(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMax(T* address, T val);
template <>
inline __device__ int32_t nd4j_atomicMin<int32_t>(int32_t* address, int32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMin<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ float nd4j_atomicMin<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ double nd4j_atomicMin<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ uint64_t nd4j_atomicMin<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMin<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = (unsigned long long)val, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ int16_t nd4j_atomicMin<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ bfloat16 nd4j_atomicMin<bfloat16>(bfloat16* address, bfloat16 val) {
return bfloat16(nd4j_atomicMin<int16_t>(&address->_data, val._data));
}
template <>
inline __device__ float16 nd4j_atomicMin<float16>(float16* address, float16 val) {
return float16(nd4j_atomicMin<int16_t>(reinterpret_cast<int16_t*>(&address->data), (int16_t)val.data));
}
template <>
inline __device__ int32_t nd4j_atomicMax<int32_t>(int32_t* address, int32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMax<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ double nd4j_atomicMax<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float nd4j_atomicMax<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_max(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ uint8_t nd4j_atomicMin<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMin<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMin<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicMax<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMax<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMax<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int16_t nd4j_atomicMax<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int32_t)val);
return *address;
}
template <>
inline __device__ float16 nd4j_atomicMax<float16>(float16* address, float16 val) {
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = nd4j_max((float16) old.B.H, val);
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = nd4j_max((float16) old.B.L, val);
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ bfloat16 nd4j_atomicMax<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
long addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = nd4j_max(old.B.H, val);
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = nd4j_max(old.B.L, val);
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ uint64_t nd4j_atomicMax<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMax((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_max((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMax<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long)nd4j_max(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
}
template <>
inline __device__ double nd4j_atomicAdd<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ Nd4jLong nd4j_atomicAdd<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ long nd4j_atomicAdd<long>(long* address, long val) {
unsigned long long* address_as_ull = (unsigned long long int *) address;
// return atomicAdd(address, val);
unsigned long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ uint32_t nd4j_atomicAdd<uint32_t>(uint32_t* address, uint32_t val) {
return atomicAdd(address, val);
}
template <>
inline __device__ uint64_t nd4j_atomicAdd<uint64_t>(uint64_t* address, uint64_t val) {
// unsigned long long* address_as_ull = (unsigned long long int *) address;
//
//// return atomicAdd(address, val);
// unsigned long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, val + assumed);
// } while (assumed != old);
// return old;
return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
template <>
inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) {
#if __CUDA_ARCH__ >= 700 && defined(CUDA_10)
atomicAdd(reinterpret_cast<__half*>(address), val.data);
#else
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = ((float16) old.B.H) + val;
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = ((float16) old.B.L) + val;
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
#endif
}
template <>
inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
auto addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = old.B.H + val;
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = old.B.L + val;
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ int16_t nd4j_atomicAdd<int16_t>(int16_t* address, int16_t val) {
return nd4j_atomicAdd((bfloat16*)address, (bfloat16)val);
}
template <>
inline __device__ uint16_t nd4j_atomicAdd<uint16_t>(uint16_t* address, uint16_t val) {
return nd4j_atomicAdd((bfloat16*)address, (bfloat16)val);
}
template <>
inline __device__ int8_t nd4j_atomicAdd<int8_t>(int8_t* address, int8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicAdd<uint8_t>(uint8_t* address, uint8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ bool nd4j_atomicAdd<bool>(bool* address, bool val) {
*address += (val);
return *address;
}
template <>
inline __device__ double nd4j_atomicSub<double>(double* address, double val) {
return nd4j_atomicAdd<double>(address, -val);
}
template <>
inline __device__ double nd4j_atomicMul<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val *
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicDiv<double>(double* address, double val) {
return nd4j_atomicMul<double>(address, 1./val);
}
template <>
inline __device__ float nd4j_atomicAdd<float>(float* address, float val) {
return atomicAdd(address,val);
}
//template <>
//inline __device__ int nd4j_atomicAdd<int>(int* address, int val) {
// return atomicAdd(address, val);
//}
template <>
inline __device__ int32_t nd4j_atomicAdd<int32_t>(int32_t* address, int32_t val) {
return (int32_t)atomicAdd((int*)address, (int)val);
}
template <>
inline __device__ float nd4j_atomicSub<float>(float* address, float val) {
return nd4j_atomicAdd<float>(address, -val);
}
template <>
inline __device__ float16 nd4j_atomicSub<float16>(float16* address, float16 val) {
return nd4j_atomicAdd<float16>(address, -val);
}
template <>
inline __device__ bfloat16 nd4j_atomicSub<bfloat16>(bfloat16* address, bfloat16 val) {
return nd4j_atomicAdd<bfloat16>(address, -val);
}
template <>
inline __device__ float nd4j_atomicMul<float>(float* address, float val) {
int* address_as_ull =
( int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ int8_t nd4j_atomicMul<int8_t>(int8_t* address, int8_t val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (int8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (int8_t)old;
}
template <>
inline __device__ unsigned char nd4j_atomicMul<unsigned char>(unsigned char* address, unsigned char val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (uint8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (uint8_t)old;
}
template <>
inline __device__ int16_t nd4j_atomicMul<int16_t>(int16_t* address, int16_t val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
int old = val, assumed;
//printf("%u %x", *base_address);
do {
assumed = old;
old = atomicCAS(base_address, assumed, (old * val));
} while (assumed != old);
return (int16_t)old;
}
template <>
inline __device__ uint16_t nd4j_atomicMul<uint16_t>(uint16_t* address, uint16_t val) {
size_t shift = ((size_t)address & 2);
unsigned int *base_address = (unsigned int *)((char*)address - shift);
unsigned int old = val, assumed;
//printf("%u %x", *base_address);
do {
assumed = old;
old = atomicCAS(base_address, assumed, (old * val));
} while (assumed != old);
return (uint16_t)old;
}
template <>
inline __device__ int nd4j_atomicMul<int>(int* address, int val) {
int* res_address = address;
int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ unsigned int nd4j_atomicMul<unsigned int>(unsigned int* address, unsigned int val) {
unsigned int* res_address = address;
unsigned int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ int64_t nd4j_atomicMul<int64_t>(int64_t* address, int64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (int64_t)old;
}
template <>
inline __device__ uint64_t nd4j_atomicMul<uint64_t>(uint64_t* address, uint64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (uint64_t)old;
}
//template <>
//inline __device__ unsigned long long nd4j_atomicMul<unsigned long long>(unsigned long long* address, unsigned long long val) {
// unsigned long long int* res_address = address;
// unsigned long long int old = *res_address, assumed;
// do {
// assumed = old;
// old = atomicCAS(res_address, assumed, val * assumed);
// } while (assumed != old);
// return old;
//}
#if !defined(_WIN32) && !defined(_WIN64)
template <>
inline __device__ Nd4jLong nd4j_atomicMul<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* res_address = (unsigned long long*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (Nd4jLong)old;
}
#endif
template <>
inline __device__ bfloat16 nd4j_atomicMul<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
long addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = old.B.H * val;
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = old.B.L * val;
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ float16 nd4j_atomicMul<float16>(float16* address, float16 val) {
auto address_as_ull = (int*) address;
long addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = old.B.H * val;
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = old.B.L * val;
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ float nd4j_atomicDiv<float>(float* address, float val) {
int* address_as_ull =
(int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(__int_as_float(assumed) / val ));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ float16 nd4j_atomicDiv<float16>(float16* address, float16 val) {
int* address_as_ull =
(int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__float_as_int(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ bfloat16 nd4j_atomicDiv<bfloat16>(bfloat16* address, bfloat16 val) {
int* address_as_ull =
(int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__float_as_int(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
}
#endif
}
}
#ifdef _OPENMP
#ifndef MAX_FLOAT
#define MAX_FLOAT 1e37
#endif
#pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\
initializer (omp_priv=0)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in * omp_out)\
initializer (omp_priv=1)
#endif
#endif /* TEMPLATEMATH_H_ */
|
residual_based_bdf_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "includes/checks.h"
#include "utilities/time_discretization.h"
#include "solving_strategies/schemes/residual_based_implicit_time_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFScheme
* @ingroup KratosCore
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* This scheme is designed to solve a system of the type:
*\f[
* \mathbf{M} \frac{d^2(u_{n0})}{dt^2} + \mathbf{D} \frac{d(un0)}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
*
* If we call:
*
* - Second derivative:
* -# \f$ \ddot{u}_{ni} \f$ the second derivative at the step i
* - First derivative:
* -# \f$ \dot{u}_{ni} \f$ the first derivative at the step i
* - Third derivative:
* -# \f$ u_{ni} \f$ the variable at the step i
*
* Then we assume:
* \f[ \frac{d^2(u_{n0})}{dt^2} \|t_{n0} = \sum_i c_i \dot{u}_{ni} \f]
* \f[ \frac{d(u_{n0})}{dt} \|t_{n0} = \sum_i c_i u_{n0} \f]
* with for order 2 (BDF2):
* -# \f$ c_0 = \frac{1.5}{dt} \f$
* -# \f$ c_1 = \frac{-2.0}{dt} \f$
* -# \f$ c_2 = \frac{0.5}{dt} \f$
*
* The LHS and RHS can be defined as:
* \f[ RHS = \mathbf{f}_{ext} - \mathbf{M} \frac{d(\dot{u}_{n0})}{dt} - \mathbf{D} \frac{d(u_{n0})}{dt} - \mathbf{K} u_{n0} \f]
* and
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2 \mathbf{M} + c_0 \mathbf{D} + K \f]
* @note This implies that elements are expected to be written in terms
* of a variable with two time derivatives
* <a href="https://mediatum.ub.tum.de/doc/1223319/80942.pdf">Main reference</a>
* @todo Create a BibTeX file https://www.stack.nl/~dimitri/doxygen/manual/commands.html#cmdcite
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFScheme
: public ResidualBasedImplicitTimeScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef typename ImplicitBaseType::TDataType TDataType;
typedef typename ImplicitBaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
/// Definition of epsilon
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFScheme(const std::size_t Order = 2)
:ImplicitBaseType(),
mOrder(Order),
mpBDFUtility(Kratos::make_unique<TimeDiscretization::BDF>(Order))
{
// Allocate auxiliary memory
const std::size_t num_threads = OpenMPUtils::GetNumThreads();
mVector.dotun0.resize(num_threads);
mVector.dot2un0.resize(num_threads);
// Doing a minimal check
KRATOS_ERROR_IF(mOrder < 1) << "ERROR:: Not possible to compute a BDF of order less than 1" << std::endl;
// We resize the BDF coefficients
if (mBDF.size() != (mOrder + 1))
mBDF.resize(mOrder + 1);
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFScheme(ResidualBasedBDFScheme& rOther)
:ImplicitBaseType(rOther)
,mOrder(rOther.mOrder)
,mBDF(rOther.mBDF)
,mVector(rOther.mVector)
,mpBDFUtility(nullptr)
{
Kratos::unique_ptr<TimeDiscretization::BDF> auxiliar_pointer = Kratos::make_unique<TimeDiscretization::BDF>(mOrder);
mpBDFUtility.swap(auxiliar_pointer);
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedBDFScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedBDFScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step
* \f[ u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u\f]
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Update of displacement (by DOF)
mpDofUpdater->UpdateDofs(rDofSet, rDx);
UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
KRATOS_ERROR << "Calling base BDF class" << std::endl;
KRATOS_CATCH( "" );
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
* @todo I cannot find the formula for the higher orders with variable time step. I tried to deduce by myself but the result was very unstable
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
ImplicitBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
mpBDFUtility->ComputeAndSaveBDFCoefficients(r_current_process_info);
mBDF = r_current_process_info[BDF_COEFFICIENTS];
KRATOS_WARNING_IF("ResidualBasedBDFScheme", mOrder > 2)
<< "For higher orders than 2 the time step is assumed to be constant.\n";
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed on the input provided.
* @details Checks can be "expensive" as the function is designed to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = ImplicitBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for minimum value of the buffer index
// Verify buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < mOrder + 1) << "Insufficient buffer size. Buffer size should be greater than " << mOrder + 1 << ". Current size is " << rModelPart.GetBufferSize() << std::endl;
KRATOS_CATCH( "" );
return 0;
}
/// Free memory allocated by this class.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "base_bdf_scheme",
"integration_order" : 2
})");
// Getting base class default parameters
const Parameters base_default_parameters = ImplicitBaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralVectors
{
std::vector< Vector > dotun0; /// First derivative
std::vector< Vector > dot2un0; /// Second derivative
};
const std::size_t mOrder; /// The integration order
Vector mBDF; /// The BDF coefficients
GeneralVectors mVector; /// The structure containing the derivatives
Kratos::unique_ptr<TimeDiscretization::BDF> mpBDFUtility; /// Utility to compute BDF coefficients
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Performing the update of the derivatives
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
inline void UpdateDerivatives(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
)
{
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
// Getting first node iterator
const auto it_node_begin = rModelPart.Nodes().begin();
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
}
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
virtual inline void UpdateFirstDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
virtual inline void UpdateSecondDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2\mathbf{M} + c_0 \mathbf{D} + \mathbf{K} \f]
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * std::pow(mBDF[0], 2);
}
// Adding damping contribution
if (rD.size1() != 0) { // if D matrix declared
noalias(rLHS_Contribution) += rD * mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the objects
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rObject The object to compute
* @param rRHS_Contribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
template <class TObjectType>
void TemplateAddDynamicsToRHS(
TObjectType& rObject,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
)
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
const auto& r_const_obj_ref = rObject;
// Adding inertia contribution
if (rM.size1() != 0) {
r_const_obj_ref.GetSecondDerivativesVector(mVector.dot2un0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, mVector.dot2un0[this_thread]);
}
// Adding damping contribution
if (rD.size1() != 0) {
r_const_obj_ref.GetFirstDerivativesVector(mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rD, mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element& rElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Element>(rElement, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* \f[ RHS = f_{ext} - \ddot{u}_{n0} \mathbf{M} + \dot{u}_{n0} \mathbf{D} + u_{n0} \mathbf{K} \f]
* @param rCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition& rCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Condition>(rCondition, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Utility class to perform the update after solving the system, will be different in MPI runs.
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_SCHEME defined */
|
hybrid.c | // mpicc -g -fopenmp hybrid.c -o hybrid.out -O3 && mpirun -np 4 hybrid.out 20 100 4
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
#include "config.h"
#include <omp.h>
#define ROOT 0
void showDistances(int matrix[], int n);
void populateMatrix(int matrix[], int n, int density, int rank, int processes);
void gatherResult(int matrix[], int n, int rank, int processes);
void castKRow(int matrix[], int n, int section, int kRow[], int k, int rank);
void floydWarshall(int matrix[], int n, int rank, int processes, int threads);
int main(int argc, char* argv[])
{
struct timespec start, end;
long long accum;
int n, density, threads;
int* matrix;
int processes, rank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &processes);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == ROOT)
{
if(argc <= 2)
{
n = DEFAULT;
density = 100;
threads = omp_get_max_threads();
}
else
{
n = atoi(argv[1]);
density = atoi(argv[2]);
threads = atoi(argv[3]);
}
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
matrix = malloc(n * n / processes * sizeof(int));
populateMatrix(matrix, n, density, rank, processes);
if (rank == ROOT)
{
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
}
floydWarshall(matrix, n, rank, processes, threads);
gatherResult(matrix, n, rank, processes);
free(matrix);
if (rank == ROOT)
{
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
accum = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("[HYBRID] Total elapsed time %lld ns\n", accum);
}
MPI_Finalize();
return 0;
}
void populateMatrix(int matrix[], int n, int density, int rank, int processes) {
int i, j, value;
int* temp_mat = NULL;
if (rank == ROOT) {
srand(42);
temp_mat = malloc(n*n*sizeof(int));
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++){
if(i == j)
{
temp_mat[i*n+j] = 0;
}
else
{
value = 1 + rand() % MAX;
if(value > density)
{
temp_mat[i*n+j] = INF;
}
else
{
temp_mat[i*n+j] = value;
}
}
}
}
int split = n * n/processes;
MPI_Scatter(temp_mat, split, MPI_INT, matrix, split, MPI_INT, 0, MPI_COMM_WORLD);
printf("*** Adjacency matrix:\n");
showDistances(temp_mat, n);
free(temp_mat);
}
else
{
int split = n * n/processes;
MPI_Scatter(temp_mat, split, MPI_INT, matrix, split, MPI_INT, 0, MPI_COMM_WORLD);
}
}
void showDistances(int matrix[], int n)
{
if(PRINTABLE)
{
int i, j;
printf(" ");
for(i = 0; i < n; i++)
{
printf("[%d] ", i);
}
printf("\n");
for(i = 0; i < n; i++) {
printf("[%d]", i);
for(j = 0; j < n; j++)
{
if(matrix[i * n + j] == INF)
{
printf(" inf");
}
else
{
printf("%5d", matrix[i * n + j]);
}
}
printf("\n");
}
printf("\n");
}
}
void gatherResult(int matrix[], int n, int rank, int processes)
{
int* temp_mat = NULL;
if (rank == ROOT) {
temp_mat = malloc(n * n * sizeof(int));
int split = n * n / processes;
MPI_Gather(matrix, split, MPI_INT, temp_mat, split, MPI_INT, 0, MPI_COMM_WORLD);
printf("The solution is:\n");
showDistances(temp_mat, n);
free(temp_mat);
}
else
{
int split = n * n / processes;
MPI_Gather(matrix, split, MPI_INT, temp_mat, split, MPI_INT, 0, MPI_COMM_WORLD);
}
}
void floydWarshall(int matrix[], int n, int rank, int processes, int threads)
{
int k, i, j, temp;
int* kRow = malloc(n*sizeof(int));
int section = n / processes;
for (k = 0; k < n; k++)
{
castKRow(matrix, n, section, kRow, k, rank);
#pragma omp parallel for num_threads(threads) shared(matrix, kRow, section, n) private(i, j, k, temp) schedule(dynamic)
for (i = 0; i < section; i++)
{
for (j = 0; j < n; j++)
{
temp = matrix[i * n + k] + kRow[j];
if (temp < matrix[i * n + j])
{
matrix[i * n + j] = temp;
}
}
}
}
free(kRow);
}
void castKRow(int matrix[], int n, int section, int kRow[], int k, int rank)
{
int j;
int localK = k % section;
int competenceHead;
competenceHead = k / section;
if (rank == competenceHead)
{
for (j = 0; j < n; j++)
{
kRow[j] = matrix[localK * n + j];
}
}
MPI_Bcast(kRow, n, MPI_INT, competenceHead, MPI_COMM_WORLD);
} |
GB_unop__abs_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_int16_int16)
// op(A') function: GB (_unop_tran__abs_int16_int16)
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = GB_IABS (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_int16_int16)
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = GB_IABS (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_int16_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ainv_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_int32_int32)
// op(A') function: GB (_unop_tran__ainv_int32_int32)
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_int32_int32)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_int32_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
common.h | #ifndef DEPTH_SEGMENTATION_COMMON_H_
#define DEPTH_SEGMENTATION_COMMON_H_
#ifdef _OPENMP
#include <omp.h>
#endif
#include <set>
#include <iostream>
#include <string>
#include <vector>
#include <glog/logging.h>
#include <opencv2/highgui.hpp>
#include <opencv2/rgbd.hpp>
#include <opencv2/viz/vizcore.hpp>
namespace depth_segmentation {
struct Segment {
std::vector<cv::Vec3f> points;
std::vector<cv::Vec3f> normals;
std::vector<cv::Vec3f> original_colors;
std::set<size_t> label;
std::set<size_t> instance_label;
std::set<size_t> semantic_label;
};
const static std::string kDebugWindowName = "DebugImages";
constexpr bool kUseTracker = false;
enum class SurfaceNormalEstimationMethod {
kFals = cv::rgbd::RgbdNormals::RGBD_NORMALS_METHOD_FALS,
kLinemod = cv::rgbd::RgbdNormals::RGBD_NORMALS_METHOD_LINEMOD,
kSri = cv::rgbd::RgbdNormals::RGBD_NORMALS_METHOD_SRI,
kDepthWindowFilter = 3,
};
struct SurfaceNormalParams {
SurfaceNormalParams() {
CHECK_EQ(window_size % 2u, 1u);
CHECK_GT(window_size, 1u);
if (method != SurfaceNormalEstimationMethod::kDepthWindowFilter) {
CHECK_LT(window_size, 8u);
}
}
size_t window_size = 13u;
SurfaceNormalEstimationMethod method =
SurfaceNormalEstimationMethod::kDepthWindowFilter;
bool display = true;
double distance_factor_threshold =0.04;
};
struct MaxDistanceMapParams {
MaxDistanceMapParams() { CHECK_EQ(window_size % 2u, 1u); }
bool use_max_distance = true;
size_t window_size = 1u;
bool display = true;
bool exclude_nan_as_max_distance = false;
bool ignore_nan_coordinates = false; // TODO(ff): This probably doesn't make
// a lot of sense -> consider removing
// it.
bool use_threshold = true;
double noise_thresholding_factor = 10.0;
double sensor_noise_param_1st_order = 0.0012; // From Nguyen et al. (2012)
double sensor_noise_param_2nd_order = 0.0019; // From Nguyen et al. (2012)
double sensor_noise_param_3rd_order = 0.0001; // From Nguyen et al. (2012)
double sensor_min_distance = 0.02;
};
struct DepthDiscontinuityMapParams {
DepthDiscontinuityMapParams() { CHECK_EQ(kernel_size % 2u, 1u); }
bool use_discontinuity = true;
size_t kernel_size = 3u;
double discontinuity_ratio = 0.01;
bool display = true;
};
struct MinConvexityMapParams {
MinConvexityMapParams() { CHECK_EQ(window_size % 2u, 1u); }
bool use_min_convexity = true;
size_t morphological_opening_size = 1u;
size_t window_size = 5u;
size_t step_size = 1u;
bool display = true;
bool use_morphological_opening = true;
bool use_threshold = true;
double threshold = 0.97;
double mask_threshold = -0.0005;
};
struct FinalEdgeMapParams {
size_t morphological_opening_size = 1u;
size_t morphological_closing_size = 1u;
bool use_morphological_opening = true;
bool use_morphological_closing = true;
bool display = true;
};
enum class LabelMapMethod {
kFloodFill = 0,
kContour = 1,
};
struct LabelMapParams {
LabelMapMethod method = LabelMapMethod::kContour;
size_t min_size = 500u;
bool use_inpaint = false;
size_t inpaint_method = 0u;
bool display = true;
};
struct SemanticInstanceSegmentationParams {
bool enable = false;
float overlap_threshold = 0.8f;
};
struct IsNan {
template <class T>
bool operator()(T const& p) const {
return std::isnan(p);
}
};
struct IsNotNan {
template <class T>
bool operator()(T const& p) const {
return !std::isnan(p);
}
};
struct Params {
bool dilate_depth_image = false;
size_t dilation_size = 1u;
FinalEdgeMapParams final_edge;
LabelMapParams label;
DepthDiscontinuityMapParams depth_discontinuity;
MaxDistanceMapParams max_distance;
MinConvexityMapParams min_convexity;
SurfaceNormalParams normals;
SemanticInstanceSegmentationParams semantic_instance_segmentation;
bool visualize_segmented_scene = false;
};
inline void visualizeDepthMap(const cv::Mat& depth_map, cv::viz::Viz3d* viz_3d) {
CHECK(!depth_map.empty());
CHECK_EQ(depth_map.type(), CV_32FC3);
CHECK_NOTNULL(viz_3d);
viz_3d->setBackgroundColor(cv::viz::Color::gray());
viz_3d->showWidget("cloud",
cv::viz::WCloud(depth_map, cv::viz::Color::red()));
viz_3d->showWidget("coo", cv::viz::WCoordinateSystem(1.5));
viz_3d->spinOnce(0, true);
}
inline void visualizeDepthMapWithNormals(const cv::Mat& depth_map,
const cv::Mat& normals,
cv::viz::Viz3d* viz_3d) {
CHECK(!depth_map.empty());
CHECK_EQ(depth_map.type(), CV_32FC3);
CHECK(!normals.empty());
CHECK_EQ(normals.type(), CV_32FC3);
CHECK_EQ(depth_map.size(), normals.size());
CHECK_NOTNULL(viz_3d);
viz_3d->setBackgroundColor(cv::viz::Color::gray());
viz_3d->showWidget("cloud",
cv::viz::WCloud(depth_map, cv::viz::Color::red()));
viz_3d->showWidget("normals",
cv::viz::WCloudNormals(depth_map, normals, 50, 0.02f,
cv::viz::Color::green()));
viz_3d->showWidget("coo", cv::viz::WCoordinateSystem(1.5));
viz_3d->spinOnce(0, true);
}
inline void computeCovariance(const cv::Mat& neighborhood, const cv::Vec3f& mean,
const size_t neighborhood_size, cv::Mat* covariance) {
CHECK(!neighborhood.empty());
CHECK_EQ(neighborhood.rows, 3u);
CHECK_GT(neighborhood_size, 0u);
CHECK_LE(neighborhood_size, neighborhood.cols);
CHECK_NOTNULL(covariance);
*covariance = cv::Mat::zeros(3, 3, CV_32F);
for (size_t i = 0u; i < neighborhood_size; ++i) {
cv::Vec3f point;
for (size_t row = 0u; row < neighborhood.rows; ++row) {
point[row] = neighborhood.at<float>(row, i) - mean[row];
}
covariance->at<float>(0, 0) += point[0] * point[0];
covariance->at<float>(0, 1) += point[0] * point[1];
covariance->at<float>(0, 2) += point[0] * point[2];
covariance->at<float>(1, 1) += point[1] * point[1];
covariance->at<float>(1, 2) += point[1] * point[2];
covariance->at<float>(2, 2) += point[2] * point[2];
}
// Assign the symmetric elements of the covariance matrix.
covariance->at<float>(1, 0) = covariance->at<float>(0, 1);
covariance->at<float>(2, 0) = covariance->at<float>(0, 2);
covariance->at<float>(2, 1) = covariance->at<float>(1, 2);
}
inline size_t findNeighborhood(const cv::Mat& depth_map, const size_t window_size,
const float max_distance, const size_t x,
const size_t y, cv::Mat* neighborhood,
cv::Vec3f* mean) {
CHECK(!depth_map.empty());
CHECK_GT(window_size, 0u);
CHECK_EQ(window_size % 2u, 1u);
CHECK_GE(max_distance, 0.0f);
CHECK_GE(x, 0u);
CHECK_GE(y, 0u);
CHECK_LT(x, depth_map.cols);
CHECK_LT(y, depth_map.rows);
CHECK_NOTNULL(neighborhood);
CHECK_NOTNULL(mean);
size_t neighborhood_size = 0u;
*neighborhood = cv::Mat::zeros(3, window_size * window_size, CV_32FC1);
cv::Vec3f mid_point = depth_map.at<cv::Vec3f>(y, x);
for (size_t y_idx = 0u; y_idx < window_size; ++y_idx) {
const int y_filter_idx = y + y_idx - window_size / 2u;
if (y_filter_idx < 0 || y_filter_idx >= depth_map.rows) {
continue;
}
CHECK_GE(y_filter_idx, 0u);
CHECK_LT(y_filter_idx, depth_map.rows);
for (size_t x_idx = 0u; x_idx < window_size; ++x_idx) {
const int x_filter_idx = x + x_idx - window_size / 2u;
if (x_filter_idx < 0 || x_filter_idx >= depth_map.cols) {
continue;
}
CHECK_GE(x_filter_idx, 0u);
CHECK_LT(x_filter_idx, depth_map.cols);
cv::Vec3f filter_point =
depth_map.at<cv::Vec3f>(y_filter_idx, x_filter_idx);
// Compute Euclidean distance between filter_point and mid_point.
const cv::Vec3f difference = mid_point - filter_point;
const float euclidean_dist = cv::sqrt(difference.dot(difference));
if (euclidean_dist < max_distance) {
// Add the filter_point to neighborhood set.
for (size_t coordinate = 0u; coordinate < 3u; ++coordinate) {
neighborhood->at<float>(coordinate, neighborhood_size) =
filter_point[coordinate];
}
++neighborhood_size;
*mean += filter_point;
}
}
}
CHECK_GE(neighborhood_size, 1u);
CHECK_LE(neighborhood_size, window_size * window_size);
*mean /= static_cast<float>(neighborhood_size);
return neighborhood_size;
}
// \brief Compute point normals of a depth image.
//
// Compute the point normals by looking at a neighborhood around each pixel.
// We're taking a standard squared kernel, where we discard points that are too
// far away from the center point (by evaluating the Euclidean distance).
//
inline void computeOwnNormals(const SurfaceNormalParams& params,
const cv::Mat& depth_map, cv::Mat* normals) {
CHECK(!depth_map.empty());
CHECK_EQ(depth_map.type(), CV_32FC3);
CHECK_NOTNULL(normals);
CHECK_EQ(depth_map.size(), normals->size());
cv::Mat neighborhood =
cv::Mat::zeros(3, params.window_size * params.window_size, CV_32FC1);
cv::Mat eigenvalues;
cv::Mat eigenvectors;
cv::Mat covariance(3, 3, CV_32FC1);
covariance = cv::Mat::zeros(3, 3, CV_32FC1);
cv::Vec3f mean;
cv::Vec3f mid_point;
clock_t strat_time, end_time;
constexpr float float_nan = std::numeric_limits<float>::quiet_NaN();
#pragma omp parallel for private(neighborhood, eigenvalues, eigenvectors, \
covariance, mean, mid_point)
strat_time = clock();
for (size_t y = 0u; y < depth_map.rows; ++y) {
for (size_t x = 0u; x < depth_map.cols; ++x) {
mid_point = depth_map.at<cv::Vec3f>(y, x);
// Skip point if z value is nan.
if (cvIsNaN(mid_point[0]) || cvIsNaN(mid_point[1]) ||
cvIsNaN(mid_point[2]) || (mid_point[2] == 0.0)) {
normals->at<cv::Vec3f>(y, x) =
cv::Vec3f(float_nan, float_nan, float_nan);
continue;
}
const float max_distance =
params.distance_factor_threshold * mid_point[2];
mean = cv::Vec3f(0.0f, 0.0f, 0.0f);
const size_t neighborhood_size =
findNeighborhood(depth_map, params.window_size, max_distance, x, y,
&neighborhood, &mean);
if (neighborhood_size > 1u) {
computeCovariance(neighborhood, mean, neighborhood_size, &covariance);
// Compute Eigen vectors.
cv::eigen(covariance, eigenvalues, eigenvectors);
// Get the Eigenvector corresponding to the smallest Eigenvalue.
constexpr size_t n_th_eigenvector = 2u;
for (size_t coordinate = 0u; coordinate < 3u; ++coordinate) {
normals->at<cv::Vec3f>(y, x)[coordinate] =
eigenvectors.at<float>(n_th_eigenvector, coordinate);
}
// Re-Orient normals to point towards camera.
if (normals->at<cv::Vec3f>(y, x)[2] > 0.0f) {
normals->at<cv::Vec3f>(y, x) = -normals->at<cv::Vec3f>(y, x);
}
} else {
normals->at<cv::Vec3f>(y, x) =
cv::Vec3f(float_nan, float_nan, float_nan);
}
}
}
end_time = clock();
std::cout << "inFor's time = " << (double)( end_time - strat_time ) / CLOCKS_PER_SEC << " s" << std::endl;
}
} // namespace depth_segmentation
#endif // DEPTH_SEGMENTATION_COMMON_H_ |
openmp_demo2.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char const *argv[])
{
int n = 1000;
int *a = NULL;
a = (int*) malloc(n * sizeof(int));
int i;
for (i=n; i--;) {
a[i] = i;
}
long long sum = 0;
long long sum2 = 0;
#pragma omp parallel
{
#pragma omp for reduction(+:sum) reduction(+:sum2)
for (i = 0; i < n; i++) {
sum += a[i];
sum2 += a[i] * a[i];
}
}
double mean = sum / (double) n;
double var = (sum2 - 2 * mean * sum + n * (mean*mean)) / n;
double stdev = sqrt(var);
printf("sum: %.2lld\n", sum);
printf("sum2: %.2lld\n", sum2);
printf("mean: %.2lf\n", mean);
printf("var: %.2lf\n", var);
printf("stdev: %.2lf\n", stdev);
return 0;
}
|
omp_bar_bench.c | /*
* Copyright (c) 2013-2016, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
*/
#include <omp.h>
#include "measurement_framework.h"
#include "placement.h"
#include <sched.h>
#include <pthread.h>
/*-----------------------------------------------------------*/
/* barrierKernel */
/* */
/* Main kernel for barrier benchmark. */
/* First threads under each process synchronise with an */
/* OMP BARRIER. Then a MPI barrier synchronises each MPI */
/* process. MPI barrier is called within a OpenMP master */
/* directive. */
/*-----------------------------------------------------------*/
int barrierKernel(int totalReps, int fill){
int repIter;
struct sk_measurement * mes;
char outname[512];
int i;
if (fill) {
sprintf(outname, "barriers_kernel_omp_fill%d", \
omp_get_max_threads());
} else {
sprintf(outname, "barriers_kernel_omp_rr%d", \
omp_get_max_threads());
}
mes = (struct sk_measurement*) malloc(sizeof(struct sk_measurement)*omp_get_max_threads());
for(i=0; i<omp_get_max_threads(); i++){
uint64_t *buf = (uint64_t*) malloc(sizeof(uint64_t)*totalReps);
sk_m_init(&(mes[i]), totalReps, outname, buf);
}
uint32_t* cores = placement(omp_get_max_threads(), fill);
/* Open the parallel region */
#pragma omp parallel default(none) \
private(repIter) \
shared(totalReps,mes,cores)//,comm)
{
int tid =omp_get_thread_num();
//set affinity
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(cores[tid],&mask);
pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&mask);
for (repIter=0; repIter<totalReps; repIter++){
#pragma omp barrier
#pragma omp barrier
sk_m_restart_tsc(&(mes[tid]));
{
#pragma omp barrier
}
sk_m_add(&(mes[tid]));
}
}
for(i=0; i<omp_get_max_threads(); i++)
sk_m_print(&(mes[i]),i);
return 0;
}
int runBarrier(int totalReps, int fill, bool sync){
int repIter;
struct sk_measurement * mes;
char outname[512];
int i;
if (sync) {
sprintf(outname, "barriers_ompsync_fill%d", omp_get_max_threads());
} else {
sprintf(outname, "barriers_omp_fill%d", omp_get_max_threads());
}
mes = (struct sk_measurement*) malloc(sizeof(struct sk_measurement)*omp_get_max_threads());
for(i=0; i<omp_get_max_threads(); i++){
uint64_t *buf = (uint64_t*) malloc(sizeof(uint64_t)*totalReps);
sk_m_init(&(mes[i]), totalReps, outname, buf);
}
uint32_t* cores = placement(omp_get_max_threads(), fill);
/* Open the parallel region */
#pragma omp parallel default(none) \
private(repIter) \
shared(totalReps,mes,cores,sync)//,comm)
{
int tid =omp_get_thread_num();
//set affinity
cpu_set_t mask;
CPU_ZERO(&mask);
CPU_SET(cores[tid],&mask);
pthread_setaffinity_np(pthread_self(),sizeof(cpu_set_t),&mask);
for (repIter=0; repIter<totalReps; repIter++){
if (sync){
// In case of additional synchronization:
// Execute two more barriers before ach barrier
#pragma omp barrier
#pragma omp barrier
// .. and reset the TSC to measure the cost of the next tsc ..
sk_m_restart_tsc(&(mes[tid]));
}
{
#pragma omp barrier
}
sk_m_add(&(mes[tid]));
}
}
for(i=0; i<omp_get_max_threads(); i++)
sk_m_print(&(mes[i]),i);
return 0;
}
int barrierDriver(int totalReps){
/* initialise repsToDo to defaultReps */
int repsToDo = totalReps;
printf("Entering\n");fflush(stdout);
/* perform warm-up for benchmark */
/* Initialise the benchmark */
barrierKernel(repsToDo, 1);
barrierKernel(repsToDo, 0);
return 0;
}
int main(int argc, char *argv[]){
int totalReps = 1000;
if (argc != 2) {
printf("Calling Barrier driver\n");
barrierDriver(totalReps);
} else {
printf("Running barrier benchmark\n");
runBarrier(totalReps, 1, 0);
runBarrier(totalReps, 1, 1);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.