|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
|
|
#include <opencv2/dnn/shape_utils.hpp>
|
|
|
#include "../precomp.hpp"
|
|
|
#include "../ie_ngraph.hpp"
|
|
|
#include "layers_common.hpp"
|
|
|
#include "cpu_kernels/fast_gemm.hpp"
|
|
|
|
|
|
namespace cv
|
|
|
{
|
|
|
namespace dnn
|
|
|
{
|
|
|
|
|
|
static bool IsTransposeReshapeForEinsum(const std::vector<size_t>& perm,
|
|
|
std::vector<int> input_dims,
|
|
|
MatShape& new_shape) {
|
|
|
|
|
|
|
|
|
size_t last_permuted_axis = 0;
|
|
|
for (size_t i = 0; i < perm.size(); ++i) {
|
|
|
if (input_dims[perm[i]] == 1)
|
|
|
continue;
|
|
|
if (perm[i] < last_permuted_axis)
|
|
|
return false;
|
|
|
last_permuted_axis = perm[i];
|
|
|
}
|
|
|
new_shape.assign(input_dims.begin(), input_dims.end());
|
|
|
for (size_t i = 0; i < perm.size(); ++i) {
|
|
|
new_shape[i] = input_dims[perm[i]];
|
|
|
}
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
|
|
|
static Mat Transpose(
|
|
|
const Mat& input,
|
|
|
const MatShape& input_shape_override,
|
|
|
const std::vector<size_t> permutation)
|
|
|
{
|
|
|
|
|
|
int input_rank = input_shape_override.size();
|
|
|
CV_Assert(input_rank == permutation.size());
|
|
|
|
|
|
bool reshape = input.dims != input_rank;
|
|
|
|
|
|
Mat input_reshaped;
|
|
|
if(reshape){
|
|
|
input_reshaped = input.reshape(1, input_shape_override.size(), input_shape_override.data());
|
|
|
}
|
|
|
|
|
|
MatShape outputDims;
|
|
|
outputDims.reserve(input_rank);
|
|
|
for (const auto& dim : permutation)
|
|
|
outputDims.emplace_back(input_shape_override[dim]);
|
|
|
|
|
|
Mat output;
|
|
|
MatShape order(permutation.begin(), permutation.end());
|
|
|
|
|
|
cv::transposeND((reshape ? input_reshaped : input), order, output);
|
|
|
return output;
|
|
|
}
|
|
|
|
|
|
|
|
|
bool IsTransposeRequired(size_t input_rank, const std::vector<size_t>& permutation) {
|
|
|
CV_Assert(input_rank == permutation.size());
|
|
|
|
|
|
|
|
|
if (input_rank == 0){
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
|
|
|
bool transpose_required = false;
|
|
|
for (size_t i = 0; i < input_rank; ++i) {
|
|
|
if (permutation[i] != i) {
|
|
|
transpose_required = true;
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
return transpose_required;
|
|
|
}
|
|
|
|
|
|
|
|
|
bool IsTransposeRequiredForDiagonal(int dim1, int dim2, int rank) {
|
|
|
|
|
|
if (rank == 2)
|
|
|
return false;
|
|
|
|
|
|
|
|
|
if ((dim1 == rank - 1 && dim2 == rank - 2) ||
|
|
|
(dim1 == rank - 2 && dim2 == rank - 1))
|
|
|
return false;
|
|
|
|
|
|
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
template <typename T>
|
|
|
Mat DiagonalDataAssignment(Mat input) {
|
|
|
|
|
|
int rank = input.dims;
|
|
|
CV_Assert(rank >= 2);
|
|
|
CV_Assert(input.size[rank - 1] == input.size[rank - 2]);
|
|
|
MatShape original_dims = shape(input);
|
|
|
|
|
|
if (rank > 3){
|
|
|
|
|
|
int collapsed_size = 1;
|
|
|
for (int i = 0; i < rank - 2; ++i) {
|
|
|
collapsed_size *= input.size[i];
|
|
|
}
|
|
|
std::vector<int> reshaped_dims = {collapsed_size, input.size[rank - 2], input.size[rank - 1]};
|
|
|
input = input.reshape(1, reshaped_dims);
|
|
|
}
|
|
|
|
|
|
|
|
|
int total_slices = input.size[0];
|
|
|
|
|
|
original_dims[rank - 1] = 1;
|
|
|
Mat output = Mat(original_dims, input.type());
|
|
|
|
|
|
int inner_stride = input.size[input.dims - 1];
|
|
|
auto inputPtr = input.ptr<T>();
|
|
|
auto outputPtr = output.ptr<T>();
|
|
|
for (int slice = 0; slice < total_slices; ++slice) {
|
|
|
for (int j = 0; j < inner_stride; ++j) {
|
|
|
|
|
|
outputPtr[slice * inner_stride + j] = inputPtr[slice * inner_stride * inner_stride + j * inner_stride + j];
|
|
|
}
|
|
|
}
|
|
|
return output;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Mat DiagonalInnermostDims(const Mat& input, bool preserve_innermost_dim_val) {
|
|
|
const MatShape input_dims = shape(input);
|
|
|
int rank = input_dims.size();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
CV_CheckEQ(input.size[rank - 1], input.size[rank - 2],
|
|
|
"innermost dims should have the same dim value to parse the diagonal elements");
|
|
|
|
|
|
MatShape output_dims = input_dims;
|
|
|
if (preserve_innermost_dim_val) {
|
|
|
output_dims[rank - 2] = 1;
|
|
|
} else {
|
|
|
output_dims[rank - 1] = 1;
|
|
|
}
|
|
|
|
|
|
|
|
|
Mat output = DiagonalDataAssignment<float>(input);
|
|
|
|
|
|
if (output_dims != shape(output)){
|
|
|
CV_Error(Error::StsError, "Output shape does not match with calculated shape");
|
|
|
}
|
|
|
return output;
|
|
|
}
|
|
|
|
|
|
Mat Diagonal(const Mat& input, int dim1, int dim2)
|
|
|
{
|
|
|
const MatShape input_dims = shape(input);
|
|
|
int rank = input_dims.size();
|
|
|
|
|
|
if (!(rank >= 2 && dim1 != dim2 && input_dims[dim1] == input_dims[dim2])){
|
|
|
std::string input_dims_str = std::accumulate(std::next(input_dims.begin()), input_dims.end(), std::to_string(input_dims[0]),
|
|
|
[](const std::string& a, int b) {
|
|
|
return a + ' ' + std::to_string(b);
|
|
|
});
|
|
|
CV_Error(Error::StsError, cv::format("Cannot parse the diagonal elements along dims %d and %d for input shape %s",dim1, dim2, input_dims_str.c_str()));
|
|
|
}
|
|
|
|
|
|
int first_dim = std::min(dim1, dim2);
|
|
|
int second_dim = std::max(dim1, dim2);
|
|
|
|
|
|
Mat output;
|
|
|
bool preserve_innermost_dim_val = false;
|
|
|
|
|
|
bool is_transpose_required = IsTransposeRequiredForDiagonal(dim1, dim2, rank);
|
|
|
if (is_transpose_required)
|
|
|
{
|
|
|
std::vector<size_t> permutation(rank, 0);
|
|
|
int first_dim_axis = -1;
|
|
|
|
|
|
|
|
|
|
|
|
if (first_dim == rank - 2) {
|
|
|
permutation[rank - 2] = first_dim;
|
|
|
first_dim_axis = rank - 2;
|
|
|
} else {
|
|
|
if (second_dim != rank - 2) {
|
|
|
permutation[rank - 2] = first_dim;
|
|
|
first_dim_axis = rank - 2;
|
|
|
} else {
|
|
|
permutation[rank - 1] = first_dim;
|
|
|
first_dim_axis = rank - 1;
|
|
|
preserve_innermost_dim_val = true;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
if (first_dim_axis != rank - 1) {
|
|
|
permutation[rank - 1] = second_dim;
|
|
|
} else {
|
|
|
permutation[rank - 2] = second_dim;
|
|
|
}
|
|
|
|
|
|
size_t iter = 0;
|
|
|
for (int i = 0; i < rank; ++i) {
|
|
|
if (i != first_dim && i != second_dim) {
|
|
|
permutation[iter++] = i;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
Mat transposed = Transpose(input, input_dims, permutation);
|
|
|
|
|
|
|
|
|
output = DiagonalInnermostDims(transposed, preserve_innermost_dim_val);
|
|
|
|
|
|
|
|
|
|
|
|
iter = 0;
|
|
|
std::vector<size_t> reverse_permutation(rank, 0);
|
|
|
for (const auto& perm : permutation) {
|
|
|
reverse_permutation[perm] = iter++;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
output = Transpose(output, shape(output), reverse_permutation);
|
|
|
} else {
|
|
|
|
|
|
output = DiagonalInnermostDims(input, preserve_innermost_dim_val);
|
|
|
}
|
|
|
|
|
|
|
|
|
MatShape output_dims = shape(output);
|
|
|
|
|
|
|
|
|
auto iter = output_dims.begin() + second_dim;
|
|
|
output_dims.erase(iter);
|
|
|
output = output.reshape(1, output_dims);
|
|
|
return output;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int letterToIndex(const char ch) {
|
|
|
if (ch >= 'a' && ch <= 'z') {
|
|
|
return static_cast<int>(ch) - 'a';
|
|
|
}
|
|
|
|
|
|
if (ch >= 'A' && ch <= 'Z') {
|
|
|
return static_cast<int>('z') + static_cast<int>(ch) - 'A';
|
|
|
}
|
|
|
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LayerEinsumImpl CV_FINAL : public EinsumLayer
|
|
|
{
|
|
|
private:
|
|
|
Ptr<ReduceLayer> reduce;
|
|
|
public:
|
|
|
|
|
|
int numInputs;
|
|
|
|
|
|
|
|
|
std::vector<MatShape> einsumInpShapes;
|
|
|
|
|
|
|
|
|
std::vector<Mat> preProcessedInputs;
|
|
|
|
|
|
|
|
|
std::vector<MatShape> homogenizedInputDims;
|
|
|
|
|
|
|
|
|
MatShape einsumOutDims;
|
|
|
|
|
|
|
|
|
String lhs_eq, rhs_eq, equation;
|
|
|
|
|
|
|
|
|
std::vector<String> lhs_eq_tokens;
|
|
|
|
|
|
|
|
|
|
|
|
bool explicitEquation = false;
|
|
|
|
|
|
|
|
|
std::vector<std::vector<int>> inputSubscriptIndices;
|
|
|
|
|
|
|
|
|
|
|
|
std::vector<int> subscriptIndicesToLastInput;
|
|
|
|
|
|
|
|
|
|
|
|
std::vector<int> subscriptIndicesToDimValue;
|
|
|
|
|
|
|
|
|
|
|
|
std::vector<int> subscriptIndicesToOutputIndices;
|
|
|
|
|
|
|
|
|
static const size_t numOfLetters = 52;
|
|
|
|
|
|
|
|
|
|
|
|
std::array<int, numOfLetters> letter2count;
|
|
|
|
|
|
|
|
|
|
|
|
std::array<int, numOfLetters> letter2index;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int numLetterIndices = 0;
|
|
|
|
|
|
|
|
|
size_t numOfEllipsisDims = 0;
|
|
|
|
|
|
|
|
|
FastGemmOpt opt;
|
|
|
|
|
|
void parseEquation(String equation);
|
|
|
void processEquation(const std::vector<MatShape>& inputs);
|
|
|
void processBroadcastedDims();
|
|
|
void validateOutputSubscript();
|
|
|
void calculateOutputShape();
|
|
|
void preProcessInputs(InputArrayOfArrays& inputs);
|
|
|
Mat reduceSum(Mat& src, MatShape& reduceAxis);
|
|
|
Mat FinalizeOutput(const Mat& candidateOuput, const MatShape& ordered_subscript_indices_in_candidate);
|
|
|
Mat pairwiseOperandProcess(
|
|
|
const Mat& left,
|
|
|
const MatShape& leftShapeOverride,
|
|
|
const Mat& right,
|
|
|
const MatShape& rightShapeOverride,
|
|
|
const MatShape& reduceDims,
|
|
|
bool isFinalPair
|
|
|
);
|
|
|
Mat batchwiseMatMul(
|
|
|
const Mat& input1,
|
|
|
const MatShape& input1ShapeOverride,
|
|
|
const Mat& input2,
|
|
|
const MatShape& input2ShapeOverride
|
|
|
);
|
|
|
|
|
|
|
|
|
LayerEinsumImpl(const LayerParams& params)
|
|
|
{
|
|
|
setParamsFrom(params);
|
|
|
equation = params.get<String>("equation");
|
|
|
int outputSize = params.get<int>("outputSize");
|
|
|
numInputs = params.get<int>("inputSize");
|
|
|
|
|
|
CV_CheckEQ(outputSize, 1, "Einsum layer should only have one output");
|
|
|
|
|
|
|
|
|
for (int i=0; i < numInputs; i++){
|
|
|
auto param = params.get("inputShapes" + cv::format("%d", i));
|
|
|
int inputDims = param.size();
|
|
|
std::vector<int> shape;
|
|
|
for (int i = 0; i < inputDims; ++i)
|
|
|
shape.emplace_back(param.get<int>(i));
|
|
|
einsumInpShapes.emplace_back(shape);
|
|
|
}
|
|
|
|
|
|
opt.init();
|
|
|
|
|
|
|
|
|
inputSubscriptIndices.reserve(numInputs);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
subscriptIndicesToLastInput.reserve(10);
|
|
|
subscriptIndicesToDimValue.reserve(10);
|
|
|
|
|
|
|
|
|
letter2count.fill(0);
|
|
|
letter2index.fill(-1);
|
|
|
|
|
|
|
|
|
|
|
|
parseEquation(equation);
|
|
|
|
|
|
|
|
|
|
|
|
processEquation(einsumInpShapes);
|
|
|
processBroadcastedDims();
|
|
|
|
|
|
|
|
|
validateOutputSubscript();
|
|
|
calculateOutputShape();
|
|
|
}
|
|
|
|
|
|
virtual bool supportBackend(int backendId) CV_OVERRIDE {
|
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
|
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
|
|
}
|
|
|
|
|
|
|
|
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
|
|
const int requiredOutputs,
|
|
|
std::vector<MatShape> &outputs,
|
|
|
std::vector<MatShape> &internals) const CV_OVERRIDE
|
|
|
{
|
|
|
CV_UNUSED(internals);
|
|
|
|
|
|
|
|
|
CV_CheckEQ(static_cast<int>(inputs.size()), numInputs,
|
|
|
"Number of inputs in forward and inputs during graph constructions do not match");
|
|
|
for (int i = 0; i < numInputs; i++)
|
|
|
{
|
|
|
if (inputs[i] != einsumInpShapes[i])
|
|
|
CV_Error(Error::StsAssert, "Passed input shapes do not match with parsed input shapes!");
|
|
|
}
|
|
|
|
|
|
outputs.clear();
|
|
|
outputs.emplace_back(einsumOutDims);
|
|
|
return true;
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
void forward(InputArrayOfArrays inputs_arr,
|
|
|
OutputArrayOfArrays outputs_arr,
|
|
|
OutputArrayOfArrays internals_arr) CV_OVERRIDE
|
|
|
{
|
|
|
CV_TRACE_FUNCTION();
|
|
|
CV_TRACE_ARG_VALUE(name, "name", name.c_str());
|
|
|
|
|
|
if (inputs_arr.depth() == CV_16F)
|
|
|
{
|
|
|
forward_fallback(inputs_arr, outputs_arr, internals_arr);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
|
|
|
preProcessInputs(inputs_arr);
|
|
|
|
|
|
std::vector<cv::Mat> rawInputs, outputs;
|
|
|
inputs_arr.getMatVector(rawInputs);
|
|
|
outputs_arr.getMatVector(outputs);
|
|
|
Mat result;
|
|
|
|
|
|
|
|
|
{
|
|
|
MatShape reducedDims;
|
|
|
MatShape preservedDims;
|
|
|
MatShape preservedShape;
|
|
|
|
|
|
reducedDims.reserve(numLetterIndices);
|
|
|
preservedDims.reserve(numLetterIndices);
|
|
|
|
|
|
for (size_t i = 0; i < numLetterIndices; ++i) {
|
|
|
if (subscriptIndicesToLastInput[i] == 0) {
|
|
|
reducedDims.push_back(i);
|
|
|
} else {
|
|
|
preservedDims.push_back(i);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
if (reducedDims.size() != 0)
|
|
|
{
|
|
|
result = reduceSum((!preProcessedInputs[0].empty() ? preProcessedInputs[0] : rawInputs[0]), reducedDims);
|
|
|
} else {
|
|
|
|
|
|
|
|
|
if (!preProcessedInputs[0].empty())
|
|
|
{
|
|
|
result = preProcessedInputs[0];
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
if (numInputs == 1) {
|
|
|
|
|
|
|
|
|
result = FinalizeOutput(!result.empty() ? result : rawInputs[0], preservedDims);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
{
|
|
|
bool isFinalPair = false;
|
|
|
|
|
|
for (int input = 1; input < numInputs; ++input) {
|
|
|
MatShape reducedDims;
|
|
|
reducedDims.reserve(numLetterIndices);
|
|
|
for (int dim = 0; dim < numLetterIndices; ++dim)
|
|
|
{
|
|
|
if (subscriptIndicesToLastInput[dim] == input)
|
|
|
{
|
|
|
|
|
|
reducedDims.push_back(dim);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
if (input == numInputs - 1)
|
|
|
isFinalPair = true;
|
|
|
|
|
|
|
|
|
MatShape tmpResult;
|
|
|
for (int i = 0; i < result.size.dims(); i++)
|
|
|
tmpResult.emplace_back(result.size[i]);
|
|
|
|
|
|
|
|
|
|
|
|
result = pairwiseOperandProcess(!result.empty() ? result : rawInputs[0],
|
|
|
!result.empty() ? tmpResult : homogenizedInputDims[0],
|
|
|
!preProcessedInputs[input].empty() ? preProcessedInputs[input] : rawInputs[input],
|
|
|
homogenizedInputDims[input],
|
|
|
reducedDims,
|
|
|
isFinalPair);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
size_t reqProd = std::accumulate(einsumOutDims.begin(), einsumOutDims.end(), 1, std::multiplies<int>());
|
|
|
MatShape realOutputDims = shape(result);
|
|
|
size_t realProd = std::accumulate(realOutputDims.begin(), realOutputDims.end(), 1, std::multiplies<int>());
|
|
|
|
|
|
CV_CheckEQ(reqProd, realProd, "Real output can not be shaped in to required output");
|
|
|
|
|
|
|
|
|
result = result.reshape(1, einsumOutDims.size(), einsumOutDims.data());
|
|
|
result.copyTo(outputs[0]);
|
|
|
}
|
|
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >&,
|
|
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE {
|
|
|
ov::OutputVector inputs(nodes.size());
|
|
|
for (size_t i = 0; i < nodes.size(); ++i) {
|
|
|
inputs[i] = nodes[i].dynamicCast<InfEngineNgraphNode>()->node;
|
|
|
}
|
|
|
auto einsum = std::make_shared<ov::op::v7::Einsum>(inputs, equation);
|
|
|
return new InfEngineNgraphNode(einsum);
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
};
|
|
|
|
|
|
Mat LayerEinsumImpl::reduceSum(Mat& src, MatShape& reduceAxis)
|
|
|
{
|
|
|
|
|
|
LayerParams lp;
|
|
|
lp.set("reduce", "SUM");
|
|
|
int num_axes = reduceAxis.size();
|
|
|
lp.set("axes", DictValue::arrayInt(&reduceAxis[0] , num_axes));
|
|
|
reduce = ReduceLayer::create(lp);
|
|
|
|
|
|
|
|
|
std::vector<MatShape> inputShapes{shape(src)};
|
|
|
std::vector<MatShape> outputShapes, internalShapes;
|
|
|
reduce->getMemoryShapes(inputShapes, 1, outputShapes, internalShapes);
|
|
|
|
|
|
Mat output(outputShapes[0], CV_32F);
|
|
|
|
|
|
std::vector<Mat> inputs;
|
|
|
std::vector<Mat> outputs;
|
|
|
std::vector<Mat> internals;
|
|
|
inputs.emplace_back(src);
|
|
|
outputs.emplace_back(output);
|
|
|
|
|
|
reduce->forward(inputs, outputs, internals);
|
|
|
return outputs[0];
|
|
|
}
|
|
|
|
|
|
void LayerEinsumImpl::preProcessInputs(InputArrayOfArrays& inputs_arr)
|
|
|
{
|
|
|
std::vector<cv::Mat> inputs;
|
|
|
inputs_arr.getMatVector(inputs);
|
|
|
|
|
|
preProcessedInputs.reserve(inputs.size());
|
|
|
homogenizedInputDims.reserve(inputs.size());
|
|
|
|
|
|
int inputIter = 0;
|
|
|
for(const Mat& input : inputs)
|
|
|
{
|
|
|
Mat preprocessed;
|
|
|
|
|
|
|
|
|
MatShape input_dims = shape(input);
|
|
|
|
|
|
const auto& currSubscriptIndices = inputSubscriptIndices[inputIter];
|
|
|
|
|
|
|
|
|
CV_CheckEQ(input_dims.size(), currSubscriptIndices.size(),
|
|
|
"Rank of the input must match number of subscript labels corresponding to the input");
|
|
|
|
|
|
std::vector<int> subscriptIndicesToInputIndex(numLetterIndices, -1);
|
|
|
|
|
|
|
|
|
MatShape homogenizedInputDims_(numLetterIndices, 1);
|
|
|
|
|
|
int dimIndexInIreprocessedInput = 0;
|
|
|
int dimIndexInOriginalInput = 0;
|
|
|
|
|
|
for (const auto& subscriptIndex : currSubscriptIndices)
|
|
|
{
|
|
|
if(subscriptIndicesToInputIndex[subscriptIndex] == -1){
|
|
|
subscriptIndicesToInputIndex[subscriptIndex] = dimIndexInIreprocessedInput++;
|
|
|
homogenizedInputDims_[subscriptIndex] = input_dims[dimIndexInOriginalInput];
|
|
|
} else {
|
|
|
|
|
|
preprocessed = Diagonal(
|
|
|
!preprocessed.empty() ? preprocessed : inputs[inputIter],
|
|
|
subscriptIndicesToInputIndex[subscriptIndex],
|
|
|
dimIndexInIreprocessedInput);
|
|
|
}
|
|
|
++dimIndexInOriginalInput;
|
|
|
}
|
|
|
|
|
|
std::vector<size_t> permutation;
|
|
|
for(auto& d : subscriptIndicesToInputIndex)
|
|
|
{
|
|
|
if (d != -1)
|
|
|
permutation.emplace_back(d);
|
|
|
}
|
|
|
|
|
|
if (IsTransposeRequired(
|
|
|
!preprocessed.empty() ? preprocessed.size.dims() : inputs[inputIter].size.dims(),
|
|
|
permutation))
|
|
|
{
|
|
|
|
|
|
preprocessed = Transpose(
|
|
|
!preprocessed.empty() ? preprocessed : inputs[inputIter],
|
|
|
!preprocessed.empty() ? shape(preprocessed) : shape(inputs[inputIter]),
|
|
|
permutation);
|
|
|
}
|
|
|
|
|
|
if (!preprocessed.empty())
|
|
|
{
|
|
|
preprocessed = preprocessed.reshape(1, homogenizedInputDims_.size(), homogenizedInputDims_.data());
|
|
|
}
|
|
|
|
|
|
preProcessedInputs.emplace_back(preprocessed);
|
|
|
homogenizedInputDims.emplace_back(homogenizedInputDims_);
|
|
|
++inputIter;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
void LayerEinsumImpl::parseEquation(String equation)
|
|
|
{
|
|
|
|
|
|
equation.erase(std::remove_if(equation.begin(), equation.end(), ::isspace), equation.end());
|
|
|
|
|
|
|
|
|
std::size_t arrow_idx = equation.find("->");
|
|
|
if (arrow_idx != std::string::npos)
|
|
|
{
|
|
|
|
|
|
lhs_eq = equation.substr(0, arrow_idx);
|
|
|
rhs_eq = equation.substr(arrow_idx + 2);
|
|
|
explicitEquation = true;
|
|
|
} else {
|
|
|
lhs_eq = equation;
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
std::stringstream src(lhs_eq);
|
|
|
for (std::string token; std::getline(src, token, ',');) {
|
|
|
lhs_eq_tokens.emplace_back(token);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
void LayerEinsumImpl::calculateOutputShape()
|
|
|
{
|
|
|
|
|
|
bool middleOfEllipsis = false;
|
|
|
int ellipsisCharCount = 0;
|
|
|
|
|
|
subscriptIndicesToOutputIndices.resize(numLetterIndices, -1);
|
|
|
|
|
|
std::array<int, numOfLetters> outputLetterToCount;
|
|
|
outputLetterToCount.fill(0);
|
|
|
|
|
|
int outputDimCounter = 0;
|
|
|
for (auto letter : rhs_eq)
|
|
|
{
|
|
|
if(letter == '.')
|
|
|
{
|
|
|
middleOfEllipsis = true;
|
|
|
|
|
|
if (++ellipsisCharCount > 3) {
|
|
|
CV_Error(Error::StsError, "Found a '.' not part of an ellipsis in the output subscript provided");
|
|
|
}
|
|
|
|
|
|
if (ellipsisCharCount == 3) {
|
|
|
middleOfEllipsis = false;
|
|
|
for (size_t i = 0; i < numOfEllipsisDims; ++i) {
|
|
|
einsumOutDims.emplace_back(subscriptIndicesToDimValue[i]);
|
|
|
|
|
|
subscriptIndicesToLastInput[i] = -1;
|
|
|
subscriptIndicesToOutputIndices[i] = outputDimCounter++;
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
CV_CheckEQ(middleOfEllipsis, false,
|
|
|
"Encountered '.' character that is not part of output subscript");
|
|
|
|
|
|
auto letterIndex = letterToIndex(letter);
|
|
|
|
|
|
CV_CheckNE(letterIndex, -1,
|
|
|
"The only permissible subscript labels are lowercase letters (a-z) and uppercase letters (A-Z).");
|
|
|
CV_CheckEQ(outputLetterToCount[letterIndex], 0,
|
|
|
"Output subscript constains repeated letters");
|
|
|
|
|
|
++outputLetterToCount[letterIndex];
|
|
|
auto mappedIndex = letter2index[letterIndex];
|
|
|
|
|
|
CV_CheckNE(mappedIndex, -1,
|
|
|
"Output subscript has letters that were not encountered in the inputs");
|
|
|
|
|
|
|
|
|
|
|
|
einsumOutDims.emplace_back(subscriptIndicesToDimValue[mappedIndex]);
|
|
|
|
|
|
|
|
|
|
|
|
subscriptIndicesToLastInput[mappedIndex] = -1;
|
|
|
subscriptIndicesToOutputIndices[mappedIndex] = outputDimCounter++;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
void LayerEinsumImpl::validateOutputSubscript()
|
|
|
{
|
|
|
|
|
|
|
|
|
if(explicitEquation)
|
|
|
{
|
|
|
|
|
|
if(numOfEllipsisDims > 0)
|
|
|
{
|
|
|
if(rhs_eq.find("...") == std::string::npos)
|
|
|
{
|
|
|
CV_Error(Error::StsError,
|
|
|
"Provided output subscript does not include ellipsis while Inputs subscrits constain ellipsis");
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
void LayerEinsumImpl::processBroadcastedDims()
|
|
|
{
|
|
|
|
|
|
if (numOfEllipsisDims > 0)
|
|
|
{
|
|
|
|
|
|
|
|
|
numLetterIndices += numOfEllipsisDims;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < numOfLetters; ++i){
|
|
|
if (letter2count[i] != -1){
|
|
|
letter2index[i] += numOfEllipsisDims;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
std::vector<int> tempIndex2LastInput(numLetterIndices, -1);
|
|
|
for (int i = 0; i < subscriptIndicesToLastInput.size(); ++i){
|
|
|
tempIndex2LastInput[i + numOfEllipsisDims] = subscriptIndicesToLastInput[i];
|
|
|
}
|
|
|
subscriptIndicesToLastInput = std::move(tempIndex2LastInput);
|
|
|
|
|
|
std::vector<int> tempIndexToDimValue(numLetterIndices, -1);
|
|
|
for (int i = 0; i < subscriptIndicesToDimValue.size(); ++i){
|
|
|
tempIndexToDimValue[i + numOfEllipsisDims] = subscriptIndicesToDimValue[i];
|
|
|
}
|
|
|
subscriptIndicesToDimValue = std::move(tempIndexToDimValue);
|
|
|
|
|
|
for (size_t i = 0; i < inputSubscriptIndices.size(); ++i)
|
|
|
{
|
|
|
auto& currentInputDimIndicesToSubscriptIndices = inputSubscriptIndices[i];
|
|
|
std::vector<int> tempCurrentInputDimIndicesToSubscriptIndices;
|
|
|
tempCurrentInputDimIndicesToSubscriptIndices.reserve(currentInputDimIndicesToSubscriptIndices.size());
|
|
|
|
|
|
|
|
|
const auto& dims = einsumInpShapes[i];
|
|
|
auto rank = dims.size();
|
|
|
|
|
|
size_t dimIter = 0;
|
|
|
size_t numBroadcastedIndices = 0;
|
|
|
while (dimIter < currentInputDimIndicesToSubscriptIndices.size())
|
|
|
{
|
|
|
auto value = currentInputDimIndicesToSubscriptIndices[dimIter];
|
|
|
if (value == numOfLetters)
|
|
|
{
|
|
|
|
|
|
CV_Assert(numBroadcastedIndices < numOfEllipsisDims);
|
|
|
tempCurrentInputDimIndicesToSubscriptIndices.push_back(static_cast<int>(numBroadcastedIndices));
|
|
|
subscriptIndicesToLastInput[numBroadcastedIndices] = i;
|
|
|
|
|
|
|
|
|
if (subscriptIndicesToDimValue[numBroadcastedIndices] == -1)
|
|
|
{
|
|
|
subscriptIndicesToDimValue[numBroadcastedIndices] = dims[dimIter];
|
|
|
} else {
|
|
|
|
|
|
if (subscriptIndicesToDimValue[numBroadcastedIndices] != dims[dimIter])
|
|
|
{
|
|
|
|
|
|
if (subscriptIndicesToDimValue[numBroadcastedIndices] == 1)
|
|
|
{
|
|
|
subscriptIndicesToDimValue[numBroadcastedIndices] = dims[dimIter];
|
|
|
} else {
|
|
|
CV_CheckEQ(dims[dimIter], 1, "The broadcasted dimensions of the inputs are incompatible");
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
++numBroadcastedIndices;
|
|
|
} else {
|
|
|
tempCurrentInputDimIndicesToSubscriptIndices.push_back(value + static_cast<int>(numOfEllipsisDims));
|
|
|
}
|
|
|
++dimIter;
|
|
|
}
|
|
|
|
|
|
CV_Assert(dimIter == rank);
|
|
|
currentInputDimIndicesToSubscriptIndices = std::move(tempCurrentInputDimIndicesToSubscriptIndices);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void LayerEinsumImpl::processEquation(const std::vector<MatShape>& inputs)
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
int num_input_tensors = inputs.size();
|
|
|
CV_CheckEQ(static_cast<int>(lhs_eq_tokens.size()), num_input_tensors,
|
|
|
"Number of input tensors does not match the number of subscripts in the input equation");
|
|
|
|
|
|
int inputIdx = 0;
|
|
|
for (const auto& token : lhs_eq_tokens)
|
|
|
{
|
|
|
const MatShape shape = inputs[inputIdx];
|
|
|
size_t rank = shape.size();
|
|
|
size_t dim_count = 0;
|
|
|
|
|
|
std::vector<int> currTokenIndices;
|
|
|
currTokenIndices.reserve(rank);
|
|
|
|
|
|
|
|
|
bool middleOfellipsis = false;
|
|
|
int ellipsisCharCount = 0;
|
|
|
for (auto letter : token)
|
|
|
{
|
|
|
if (letter == '.')
|
|
|
{
|
|
|
middleOfellipsis = true;
|
|
|
|
|
|
|
|
|
if (++ellipsisCharCount > 3)
|
|
|
{
|
|
|
CV_Error(Error::StsError, cv::format("Found a '.' not part of an ellipsis in input: %d", inputIdx));
|
|
|
}
|
|
|
|
|
|
|
|
|
if (ellipsisCharCount == 3)
|
|
|
{
|
|
|
middleOfellipsis = false;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int currentNumOfEllipsisDims = static_cast<int>(rank) - token.length() + 3;
|
|
|
CV_CheckGE(currentNumOfEllipsisDims, 0,
|
|
|
"Einsum subscripts string contains too many subscript labels when compared to the rank of the input");
|
|
|
|
|
|
|
|
|
|
|
|
if (currentNumOfEllipsisDims != 0)
|
|
|
{
|
|
|
|
|
|
|
|
|
if (numOfEllipsisDims != 0){
|
|
|
CV_CheckEQ(numOfEllipsisDims, static_cast<size_t>(currentNumOfEllipsisDims),
|
|
|
"Ellipsis must indicate a fixed number of dimensions across all inputs");
|
|
|
} else {
|
|
|
numOfEllipsisDims = static_cast<size_t>(currentNumOfEllipsisDims);
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < numOfEllipsisDims; ++i){
|
|
|
currTokenIndices.push_back(numOfLetters);
|
|
|
}
|
|
|
|
|
|
|
|
|
dim_count += numOfEllipsisDims;
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
if (middleOfellipsis){
|
|
|
CV_Error(Error::StsAssert,
|
|
|
cv::format(
|
|
|
"Encountered '.' character that is not part of an ellipsis in the input: [%d]",
|
|
|
inputIdx));
|
|
|
}
|
|
|
|
|
|
int letterIdx = letterToIndex(letter);
|
|
|
CV_CheckNE(letterIdx, -1,
|
|
|
"The only permissible subscript labels are lowercase letters (a-z) and uppercase letters (A-Z).");
|
|
|
|
|
|
int dimValue = shape[dim_count];
|
|
|
|
|
|
|
|
|
|
|
|
if(letter2count[letterIdx] == 0){
|
|
|
letter2index[letterIdx] = numLetterIndices++;
|
|
|
subscriptIndicesToDimValue.push_back(dimValue);
|
|
|
subscriptIndicesToLastInput.push_back(inputIdx);
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
|
auto mappedIndx = letter2index[letterIdx];
|
|
|
subscriptIndicesToLastInput[mappedIndx] = inputIdx;
|
|
|
|
|
|
if (subscriptIndicesToDimValue[mappedIndx] != dimValue) {
|
|
|
if (dimValue != 1) {
|
|
|
CV_Error(Error::StsError, cv::format("Einsum operands can not be broadcasted."
|
|
|
"Check input shapes/equation passed."
|
|
|
"Input shape of operand [%d]", inputIdx) +
|
|
|
cv::format(" is incompatible in the dimention [%zu].", static_cast<size_t>(dim_count)));
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
++letter2count[letterIdx];
|
|
|
currTokenIndices.push_back(letter2index[letterIdx]);
|
|
|
|
|
|
CV_CheckLE(++dim_count, rank,
|
|
|
"The Einsum subscripts string has an excessive number of subscript labels compared to the rank of the input.");
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
CV_Assert(!(numOfEllipsisDims == 0 && dim_count != rank)
|
|
|
&& "The Einsum subscripts string does not contain required amount of subscript labels and no ellipsis is provided in the input.");
|
|
|
|
|
|
inputSubscriptIndices.emplace_back(std::move(currTokenIndices));
|
|
|
++inputIdx;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
Mat LayerEinsumImpl::FinalizeOutput(
|
|
|
const Mat& candidateOutput,
|
|
|
const MatShape& ordered_subscript_indices_in_candidate)
|
|
|
{
|
|
|
const std::vector<int>& subscript_indices_to_output_indices = subscriptIndicesToOutputIndices;
|
|
|
const auto output_dims = einsumOutDims;
|
|
|
|
|
|
MatShape output_shape = output_dims;
|
|
|
const auto output_rank = output_dims.size();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const MatShape candidate_output_dims = MatShape(candidateOutput.size.p, candidateOutput.size.p + candidateOutput.dims);
|
|
|
const int candidate_output_rank = candidate_output_dims.size();
|
|
|
|
|
|
|
|
|
|
|
|
MatShape candidate_output_shape_without_reduced_dims;
|
|
|
candidate_output_shape_without_reduced_dims.reserve(candidate_output_rank);
|
|
|
|
|
|
|
|
|
std::vector<size_t> output_permutation;
|
|
|
output_permutation.resize(output_rank, 0);
|
|
|
size_t output_iter = 0;
|
|
|
|
|
|
for (size_t iter = 0, end = ordered_subscript_indices_in_candidate.size(); iter < end; ++iter)
|
|
|
{
|
|
|
auto output_index = subscript_indices_to_output_indices[ordered_subscript_indices_in_candidate[iter]];
|
|
|
|
|
|
|
|
|
if (output_index != -1)
|
|
|
{
|
|
|
output_permutation[output_index] = output_iter++;
|
|
|
candidate_output_shape_without_reduced_dims.push_back(candidate_output_dims[iter]);
|
|
|
} else {
|
|
|
|
|
|
CV_CheckEQ(candidate_output_dims[iter], 1,
|
|
|
"Not all dimensions to be reduced have been reduced in the candidate output. Candidate output dims: ");
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (IsTransposeRequired(candidate_output_shape_without_reduced_dims.size(), output_permutation))
|
|
|
{
|
|
|
auto candidate_output_transposed = Transpose(
|
|
|
candidateOutput,
|
|
|
candidate_output_shape_without_reduced_dims,
|
|
|
output_permutation);
|
|
|
return candidate_output_transposed;
|
|
|
}
|
|
|
return candidateOutput;
|
|
|
}
|
|
|
|
|
|
Mat LayerEinsumImpl::pairwiseOperandProcess(
|
|
|
const Mat& left,
|
|
|
const MatShape& leftShapeOverride,
|
|
|
const Mat& right,
|
|
|
const MatShape& rightShapeOverride,
|
|
|
const MatShape& reduceDims,
|
|
|
bool isFinalPair
|
|
|
)
|
|
|
{
|
|
|
size_t matDimSize = left.total();
|
|
|
size_t overrideDimSize = total(leftShapeOverride);
|
|
|
|
|
|
CV_CheckEQ(matDimSize, overrideDimSize, "Override dims are not compatible with left tensor shape");
|
|
|
|
|
|
matDimSize = right.total();
|
|
|
overrideDimSize = total(rightShapeOverride);
|
|
|
|
|
|
CV_CheckEQ(matDimSize, overrideDimSize, "Override dims are not compatible with right tensor shape");
|
|
|
|
|
|
|
|
|
const auto& leftDims = leftShapeOverride;
|
|
|
const auto& rightDims = rightShapeOverride;
|
|
|
|
|
|
int leftRank = static_cast<int>(leftDims.size());
|
|
|
int rightRank = static_cast<int>(rightDims.size());
|
|
|
|
|
|
Mat currentLeft;
|
|
|
Mat currentRight;
|
|
|
|
|
|
CV_CheckEQ(leftRank, rightRank, "Raks of pair-wise operands must be equal");
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::vector<size_t> lro;
|
|
|
lro.reserve(5);
|
|
|
|
|
|
std::vector<size_t> lo;
|
|
|
lo.reserve(5);
|
|
|
|
|
|
std::vector<size_t> ro;
|
|
|
ro.reserve(5);
|
|
|
|
|
|
|
|
|
int lro_size = 1;
|
|
|
int lo_size = 1;
|
|
|
int ro_size = 1;
|
|
|
int reduced_size = 1;
|
|
|
|
|
|
size_t reduceDimsIter = 0;
|
|
|
size_t reduceDimsSize = reduceDims.size();
|
|
|
|
|
|
for (int i = 0; i < leftRank; ++i)
|
|
|
{
|
|
|
int leftDim = leftDims[i];
|
|
|
int rightDim = rightDims[i];
|
|
|
|
|
|
bool hasLeftDim = leftDim > 1;
|
|
|
bool hasRightDim = rightDim > 1;
|
|
|
|
|
|
if (reduceDimsIter < reduceDimsSize && reduceDims[reduceDimsIter] == i)
|
|
|
{
|
|
|
|
|
|
++reduceDimsIter;
|
|
|
if (hasLeftDim && hasRightDim){
|
|
|
|
|
|
|
|
|
CV_CheckEQ(leftDim, rightDim, "Einsum op: Input dimensions must be equal along an axis to be reduced across all inputs");
|
|
|
reduced_size *= leftDim;
|
|
|
|
|
|
} else if (hasLeftDim){
|
|
|
|
|
|
Mat tensorToReduce = !currentLeft.empty() ? currentLeft : left;
|
|
|
MatShape shapeToReduce = !currentLeft.empty() ? shape(currentLeft) : leftDims;
|
|
|
currentLeft = reduceSum(tensorToReduce, shapeToReduce);
|
|
|
|
|
|
} else if (hasRightDim){
|
|
|
Mat tensorToReduce = !currentRight.empty() ? currentRight : right;
|
|
|
MatShape shapeToReduce = !currentRight.empty() ? shape(currentRight) : rightDims;
|
|
|
currentLeft = reduceSum(tensorToReduce, shapeToReduce);
|
|
|
}
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
|
|
|
|
if (hasLeftDim && hasRightDim){
|
|
|
CV_CheckEQ(leftDim, rightDim, "Input shapes do not align");
|
|
|
lro.push_back(i);
|
|
|
lro_size *= leftDim;
|
|
|
|
|
|
} else if (hasLeftDim) {
|
|
|
|
|
|
lo.push_back(i);
|
|
|
lo_size *= leftDim;
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
|
|
|
|
ro.push_back(i);
|
|
|
ro_size *= rightDim;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MatShape reshaped_dims;
|
|
|
std::vector<size_t> left_permutation;
|
|
|
left_permutation.reserve(lro.size() + lo.size() + reduceDims.size() + ro.size());
|
|
|
left_permutation.insert(left_permutation.end(), lro.begin(), lro.end());
|
|
|
left_permutation.insert(left_permutation.end(), lo.begin(), lo.end());
|
|
|
|
|
|
|
|
|
for (auto& a : reduceDims)
|
|
|
{
|
|
|
left_permutation.push_back(a);
|
|
|
}
|
|
|
left_permutation.insert(left_permutation.end(), ro.begin(), ro.end());
|
|
|
|
|
|
if (IsTransposeRequired(!currentLeft.empty() ? currentLeft.dims : leftDims.size(),
|
|
|
left_permutation))
|
|
|
{
|
|
|
if (!currentLeft.empty() && IsTransposeReshapeForEinsum(left_permutation,
|
|
|
shape(currentLeft),
|
|
|
reshaped_dims))
|
|
|
{
|
|
|
|
|
|
|
|
|
|
|
|
currentLeft = currentLeft.reshape(1, reshaped_dims.size(), reshaped_dims.data());
|
|
|
} else {
|
|
|
|
|
|
currentLeft = Transpose(!currentLeft.empty() ? currentLeft: left,
|
|
|
!currentLeft.empty() ? shape(currentLeft) : leftDims,
|
|
|
left_permutation);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
std::vector<size_t> right_permutation;
|
|
|
right_permutation.reserve(lro.size() + lo.size() + reduceDims.size() + ro.size());
|
|
|
right_permutation.insert(right_permutation.end(), lro.begin(), lro.end());
|
|
|
|
|
|
for (auto& a : reduceDims) {
|
|
|
right_permutation.push_back(a);
|
|
|
}
|
|
|
right_permutation.insert(right_permutation.end(), ro.begin(), ro.end());
|
|
|
right_permutation.insert(right_permutation.end(), lo.begin(), lo.end());
|
|
|
|
|
|
if (IsTransposeRequired(!currentRight.empty() ? currentRight.dims: rightDims.size(),
|
|
|
right_permutation))
|
|
|
{
|
|
|
if (!currentRight.empty() && IsTransposeReshapeForEinsum(right_permutation,
|
|
|
shape(currentRight),
|
|
|
reshaped_dims))
|
|
|
{
|
|
|
currentRight = currentRight.reshape(1, reshaped_dims.size(), reshaped_dims.data());
|
|
|
} else {
|
|
|
currentRight = Transpose(!currentRight.empty() ? currentRight : right,
|
|
|
!currentRight.empty() ? shape(currentRight) : rightDims,
|
|
|
right_permutation);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MatShape outputDims;
|
|
|
outputDims.reserve(lro.size() + lo.size() + reduceDims.size() + ro.size());
|
|
|
for (size_t i = 0; i < lro.size(); ++i)
|
|
|
{
|
|
|
outputDims.emplace_back(leftDims[lro[i]]);
|
|
|
}
|
|
|
|
|
|
for (size_t i = 0; i < lo.size(); ++i)
|
|
|
{
|
|
|
outputDims.emplace_back(leftDims[lo[i]]);
|
|
|
}
|
|
|
|
|
|
for (size_t i = 0; i < reduceDims.size(); ++i)
|
|
|
{
|
|
|
outputDims.emplace_back(1);
|
|
|
}
|
|
|
|
|
|
for (size_t i = 0; i < ro.size(); ++i) {
|
|
|
outputDims.emplace_back(rightDims[ro[i]]);
|
|
|
}
|
|
|
|
|
|
MatShape currentSubscriptOrder;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
std::vector<size_t> outputPermutation;
|
|
|
if (!isFinalPair) {
|
|
|
outputPermutation.resize(lro.size() + lo.size() + reduceDims.size() + ro.size(), 0);
|
|
|
size_t iter = 0;
|
|
|
for (size_t i = 0; i < lro.size(); ++i)
|
|
|
{
|
|
|
outputPermutation[lro[i]] = iter++;
|
|
|
}
|
|
|
|
|
|
for (size_t i = 0; i < lo.size(); ++i)
|
|
|
{
|
|
|
outputPermutation[lo[i]] = iter++;
|
|
|
}
|
|
|
|
|
|
for (size_t i = 0; i < reduceDims.size(); ++i)
|
|
|
{
|
|
|
outputPermutation[reduceDims[i]] = iter++;
|
|
|
}
|
|
|
|
|
|
for (size_t i = 0; i < ro.size(); ++i)
|
|
|
{
|
|
|
outputPermutation[ro[i]] = iter++;
|
|
|
}
|
|
|
|
|
|
} else {
|
|
|
currentSubscriptOrder.reserve(lro.size() + lo.size() + reduceDims.size() + ro.size());
|
|
|
currentSubscriptOrder.insert(currentSubscriptOrder.end(), lro.begin(), lro.end());
|
|
|
currentSubscriptOrder.insert(currentSubscriptOrder.end(), lo.begin(), lo.end());
|
|
|
currentSubscriptOrder.insert(currentSubscriptOrder.end(), reduceDims.begin(), reduceDims.end());
|
|
|
currentSubscriptOrder.insert(currentSubscriptOrder.end(), ro.begin(), ro.end());
|
|
|
}
|
|
|
|
|
|
Mat output = batchwiseMatMul(
|
|
|
!currentLeft.empty() ? currentLeft : left,
|
|
|
MatShape({static_cast<int>(lro_size), static_cast<int>(lo_size), static_cast<int>(reduced_size)}),
|
|
|
!currentRight.empty() ? currentRight : right,
|
|
|
MatShape({static_cast<int>(lro_size), static_cast<int>(reduced_size), static_cast<int>(ro_size)})
|
|
|
);
|
|
|
|
|
|
|
|
|
output = output.reshape(1, outputDims.size(), outputDims.data());
|
|
|
|
|
|
if (!isFinalPair)
|
|
|
{
|
|
|
if (IsTransposeRequired(outputDims.size(), outputPermutation))
|
|
|
{
|
|
|
if (IsTransposeReshapeForEinsum(outputPermutation,
|
|
|
outputDims,
|
|
|
reshaped_dims))
|
|
|
{
|
|
|
|
|
|
|
|
|
output = output.reshape(1, reshaped_dims.size(), reshaped_dims.data());
|
|
|
}
|
|
|
else {
|
|
|
output = Transpose(
|
|
|
output,
|
|
|
outputDims,
|
|
|
outputPermutation);
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
|
|
|
output = FinalizeOutput(output, currentSubscriptOrder);
|
|
|
}
|
|
|
return output;
|
|
|
};
|
|
|
|
|
|
Mat LayerEinsumImpl::batchwiseMatMul(
|
|
|
const Mat& input1,
|
|
|
const MatShape& input1ShapeOverride,
|
|
|
const Mat& input2,
|
|
|
const MatShape& input2ShapeOverride)
|
|
|
{
|
|
|
|
|
|
CV_CheckType(input1.type(), input2.type(), "Data types of the inputs must match for MatMul");
|
|
|
CV_CheckEQ(input1ShapeOverride.size(), (size_t) 3, "Only 1 batch dimension is allowed for MatMul");
|
|
|
CV_CheckEQ(input2ShapeOverride.size(), (size_t) 3, "Only 1 batch dimension is allowed for MatMul");
|
|
|
CV_CheckEQ((size_t) input1ShapeOverride[0], (size_t) input2ShapeOverride[0], "Batch dimension should match for MatMul;");
|
|
|
CV_CheckEQ((size_t) input1ShapeOverride[2], (size_t) input2ShapeOverride[1], "Incompatible matrix dimensions for matMul");
|
|
|
|
|
|
int batches = input1ShapeOverride[0];
|
|
|
int M = input1ShapeOverride[1];
|
|
|
int K = input1ShapeOverride[2];
|
|
|
int N = input2ShapeOverride[2];
|
|
|
|
|
|
Mat reshapedInput1 = input1;
|
|
|
Mat reshapedInput2 = input2;
|
|
|
|
|
|
Mat output;
|
|
|
if (batches > 1)
|
|
|
{
|
|
|
|
|
|
output = Mat({batches, M, N}, input1.type());
|
|
|
|
|
|
reshapedInput2 = reshapedInput2.reshape(1, input2ShapeOverride);
|
|
|
reshapedInput1 = reshapedInput1.reshape(1, input1ShapeOverride);
|
|
|
|
|
|
fastGemmBatch(false, false, 1.0, reshapedInput1, reshapedInput2, 0.0, output, opt);
|
|
|
} else {
|
|
|
|
|
|
|
|
|
if (input1.dims > 2 || input1.size[0] != M || input1.size[1] != K)
|
|
|
{
|
|
|
int shape[] = {M, K};
|
|
|
reshapedInput1 = input1.reshape(1, 2, shape);
|
|
|
}
|
|
|
|
|
|
|
|
|
if (input2.dims > 2 || input2.size[0] != K || input2.size[1] != N)
|
|
|
{
|
|
|
int shape2[] = {K, N};
|
|
|
reshapedInput2 = input2.reshape(1, 2, shape2);
|
|
|
}
|
|
|
|
|
|
output = Mat(M, N, reshapedInput1.type());
|
|
|
fastGemm(false, false, 1.0, reshapedInput1, reshapedInput2, 0.0, output, opt);
|
|
|
|
|
|
output = output.reshape(1, {1, M, N});
|
|
|
}
|
|
|
return output;
|
|
|
};
|
|
|
Ptr<EinsumLayer> EinsumLayer::create(const LayerParams& params)
|
|
|
{
|
|
|
return makePtr<LayerEinsumImpl>(params);
|
|
|
}
|
|
|
|
|
|
}}
|
|
|
|