repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/caffe2/operators/accuracy_op.h
|
#ifndef CAFFE2_OPERATORS_ACCURACY_OP_H_
#define CAFFE2_OPERATORS_ACCURACY_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class AccuracyOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit AccuracyOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
top_k_(this->template GetSingleArgument<int>("top_k", 1)) {}
bool RunOnDevice() override;
protected:
int top_k_;
INPUT_TAGS(PREDICTION, LABEL);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ACCURACY_OP_H_
| 652
| 22.321429
| 68
|
h
|
null |
pytorch-main/caffe2/operators/activation_ops_cudnn.h
|
#ifndef CAFFE2_OPERATORS_ACTIVATION_OPS_CUDNN_H_
#define CAFFE2_OPERATORS_ACTIVATION_OPS_CUDNN_H_
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/cudnn_wrappers.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/types.h"
namespace caffe2 {
class CuDNNActivationOpBase : public Operator<CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
template <class... Args>
explicit CuDNNActivationOpBase(Args&&... args)
: Operator<CUDAContext>(std::forward<Args>(args)...),
cudnn_wrapper_(&context_) {
CUDNN_ENFORCE(cudnnCreateTensorDescriptor(&data_desc_));
CUDNN_ENFORCE(cudnnCreateActivationDescriptor(&act_desc_));
}
virtual ~CuDNNActivationOpBase() {
CUDNN_ENFORCE(cudnnDestroyTensorDescriptor(data_desc_));
CUDNN_ENFORCE(cudnnDestroyActivationDescriptor(act_desc_));
}
protected:
void SetTensorDescriptor(
const cudnnDataType_t data_type,
const int data_size) {
if (data_size != input_size_) {
// Since the best performance is obtained when the tensor is HW-packed, we
// put X.size() to W.
input_size_ = data_size;
CUDNN_ENFORCE(cudnnSetTensor4dDescriptor(
data_desc_,
GetCudnnTensorFormat(StorageOrder::NCHW),
data_type,
1,
1,
1,
input_size_));
}
}
CuDNNWrapper cudnn_wrapper_;
cudnnTensorDescriptor_t data_desc_;
cudnnActivationDescriptor_t act_desc_;
int input_size_ = 0;
};
template <cudnnActivationMode_t kCuDNNActivationMode>
class CuDNNActivationOp final : public CuDNNActivationOpBase {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
template <class... Args>
explicit CuDNNActivationOp(Args&&... args)
: CuDNNActivationOpBase(std::forward<Args>(args)...) {
CUDNN_ENFORCE(cudnnSetActivationDescriptor(
act_desc_, kCuDNNActivationMode, CUDNN_PROPAGATE_NAN, 0.0));
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
if (X.numel() == 0) {
Y->template mutable_data<T>();
return true;
}
this->SetTensorDescriptor(cudnnTypeWrapper<T>::type, X.numel());
CUDNN_ENFORCE(cudnnActivationForward(
this->cudnn_wrapper_.inline_cudnn_handle(),
this->act_desc_,
cudnnTypeWrapper<T>::kOne(),
this->data_desc_,
X.template data<T>(),
cudnnTypeWrapper<T>::kZero(),
this->data_desc_,
Y->template mutable_data<T>()));
return true;
}
};
template <cudnnActivationMode_t kCuDNNActivationMode>
class CuDNNActivationGradientOp final : public CuDNNActivationOpBase {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
template <class... Args>
explicit CuDNNActivationGradientOp(Args&&... args)
: CuDNNActivationOpBase(std::forward<Args>(args)...) {
CUDNN_ENFORCE(cudnnSetActivationDescriptor(
act_desc_, kCuDNNActivationMode, CUDNN_PROPAGATE_NAN, 0.0));
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, at::Half>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& Y = Input(0);
const auto& dY = Input(1);
auto* dX = Output(0, Y.sizes(), at::dtype<T>());
if (Y.numel() == 0) {
dX->template mutable_data<T>();
return true;
}
this->SetTensorDescriptor(cudnnTypeWrapper<T>::type, Y.numel());
CUDNN_ENFORCE(cudnnActivationBackward(
this->cudnn_wrapper_.inline_cudnn_handle(),
this->act_desc_,
cudnnTypeWrapper<T>::kOne(),
this->data_desc_,
Y.template data<T>(),
this->data_desc_,
dY.template data<T>(),
this->data_desc_,
Y.template data<T>(), // Use Y_data as placeholder here.
cudnnTypeWrapper<T>::kZero(),
this->data_desc_,
dX->template mutable_data<T>()));
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ACTIVATION_OPS_CUDNN_H_
| 4,122
| 28.241135
| 80
|
h
|
null |
pytorch-main/caffe2/operators/affine_channel_op.h
|
#ifndef CAFFE2_OPERATORS_AFFINE_CHANNEL_OP_H_
#define CAFFE2_OPERATORS_AFFINE_CHANNEL_OP_H_
#include <string>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class AffineChannelOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit AffineChannelOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))),
OP_SINGLE_ARG(bool, "is_learnable", is_learnable_, false) {
CAFFE_ENFORCE_NE(order_, StorageOrder::UNKNOWN);
}
bool RunOnDevice() override {
return order_ == StorageOrder::NCHW ? RunOnDeviceWithOrderNCHW()
: RunOnDeviceWithOrderNHWC();
}
bool RunOnDeviceWithOrderNCHW() {
const auto& X = Input(0);
const auto& scale = Input(1);
const auto& bias = Input(2);
if (is_learnable_) {
CAFFE_ENFORCE(
!IsInputOutputAlias(0, 0),
"In-place affine_channel_op is not supported when "
"is_learnable = true.");
}
const int N = X.dim32(0);
const int C = X.dim32(1);
const int HxW = X.numel() / (N * C);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
math::AffineChannel<T, Context, StorageOrder::NCHW>(
N,
C,
HxW,
X.template data<T>(),
scale.template data<T>(),
bias.template data<T>(),
Y->template mutable_data<T>(),
&context_);
return true;
}
bool RunOnDeviceWithOrderNHWC() {
const auto& X = Input(0);
const auto& scale = Input(1);
const auto& bias = Input(2);
if (is_learnable_) {
CAFFE_ENFORCE(
!IsInputOutputAlias(0, 0),
"In-place affine_channel_op is not supported when "
"is_learnable = true.");
}
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
const int HxW = X.numel() / (N * C);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
math::AffineChannel<T, Context, StorageOrder::NHWC>(
N,
C,
HxW,
X.template data<T>(),
scale.template data<T>(),
bias.template data<T>(),
Y->template mutable_data<T>(),
&context_);
return true;
}
private:
const StorageOrder order_;
const bool is_learnable_;
};
template <typename T, class Context>
class AffineChannelGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit AffineChannelGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))),
OP_SINGLE_ARG(bool, "is_learnable", is_learnable_, false) {
CAFFE_ENFORCE_NE(order_, StorageOrder::UNKNOWN);
}
bool RunOnDevice() override {
return order_ == StorageOrder::NCHW ? RunOnDeviceWithOrderNCHW()
: RunOnDeviceWithOrderNHWC();
}
bool RunOnDeviceWithOrderNCHW();
bool RunOnDeviceWithOrderNHWC();
private:
const StorageOrder order_;
const bool is_learnable_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_AFFINE_CHANNEL_OP_H_
| 3,450
| 27.056911
| 77
|
h
|
null |
pytorch-main/caffe2/operators/alias_with_name.h
|
#ifndef ALIAS_WITH_NAME_OP_H_
#define ALIAS_WITH_NAME_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(AliasWithName)
namespace caffe2 {
template <class Context>
class AliasWithNameOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit AliasWithNameOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
name_(this->template GetSingleArgument<std::string>(
"name",
"invalid_name")),
is_backward_(
this->template GetSingleArgument<bool>("is_backward", false)) {
CAFFE_ENFORCE(
OperatorBase::HasArgument("name"), "You have to specify argument name");
}
bool RunOnDevice() override {
auto& input = Input(0);
CAFFE_ENFORCE_GE(input.numel(), 0, "Tensor is not initialized");
// This doesn't work anymore as this is "newstyle" operator
// OutputTensorAlias(0, input);
OperatorBase::SetOutputTensor(0, input.Alias());
return true;
}
protected:
std::string name_;
bool is_backward_;
};
} // namespace caffe2
#endif // ALIAS_WITH_NAME_OP_H_
| 1,234
| 25.276596
| 80
|
h
|
null |
pytorch-main/caffe2/operators/apmeter_op.h
|
#ifndef CAFFE2_MAP_OP_H_
#define CAFFE2_MAP_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class APMeterOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit APMeterOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
buffer_size_(
this->template GetSingleArgument<int32_t>("buffer_size", 1000)),
buffer_used_(0) {}
bool RunOnDevice() override;
protected:
using BufferDataType = std::pair<float, int>;
// Buffer the predictions for each class
std::vector<std::vector<BufferDataType>> buffers_;
// Capacity of the buffer
int buffer_size_;
// Used buffer
int buffer_used_;
INPUT_TAGS(PREDICTION, LABEL);
protected:
// Buffer predictions for N sample and D classes
void
BufferPredictions(const float* Xdata, const int* labelData, int N, int D);
};
} // namespace caffe2
#endif // CAFFE2_MAP_OP_H_
| 1,027
| 22.906977
| 76
|
h
|
null |
pytorch-main/caffe2/operators/arg_ops.h
|
#ifndef CAFFE2_OPERATORS_ARG_OPS_H_
#define CAFFE2_OPERATORS_ARG_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include <c10/util/irange.h>
#include <algorithm>
#include <iterator>
#include <vector>
namespace caffe2 {
template <class Context, class Reducer>
class ArgOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ArgOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(bool, "keepdims", keep_dims_, true) {}
bool RunOnDevice() override {
return DispatchHelper<
TensorTypes<std::int32_t, std::int64_t, float, double>>::
call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& X = Input(0);
const int ndim = X.dim();
if (axis_ == -1) {
axis_ = ndim - 1;
}
CAFFE_ENFORCE_GE(axis_, 0);
CAFFE_ENFORCE_LT(axis_, ndim);
const std::vector<int> X_dims(X.sizes().cbegin(), X.sizes().cend());
std::vector<int64_t> Y_dims;
Y_dims.reserve(ndim);
int prev_size = 1;
int next_size = 1;
for (const auto i : c10::irange(axis_)) {
Y_dims.push_back(X_dims[i]);
prev_size *= X_dims[i];
}
if (keep_dims_) {
Y_dims.push_back(1);
}
for (int i = axis_ + 1; i < ndim; ++i) {
Y_dims.push_back(X_dims[i]);
next_size *= X_dims[i];
}
auto* Y = Output(0, Y_dims, at::dtype<int64_t>());
const int n = X_dims[axis_];
return reducer_(
prev_size,
next_size,
n,
X.template data<T>(),
Y->template mutable_data<int64_t>(),
&context_);
}
private:
int axis_;
const bool keep_dims_;
Reducer reducer_{};
};
template <class Context>
struct ArgMaxReducer {
template <typename T>
bool operator()(
const int prev_size,
const int next_size,
const int n,
const T* X,
int64_t* Y,
Context* context) const;
};
template <class Context>
struct ArgMinReducer {
template <typename T>
bool operator()(
const int prev_size,
const int next_size,
const int n,
const T* X,
int64_t* Y,
Context* context) const;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ARG_OPS_H_
| 2,356
| 22.107843
| 72
|
h
|
null |
pytorch-main/caffe2/operators/assert_op.h
|
#ifndef CAFFE2_OPERATORS_ASSERT_OP_H_
#define CAFFE2_OPERATORS_ASSERT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include <c10/util/irange.h>
namespace caffe2 {
template <class Context>
class AssertOp final : public Operator<Context> {
public:
template <class... Args>
explicit AssertOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
error_msg_(
this->template GetSingleArgument<std::string>("error_msg", "")) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <typename T>
bool DoRunWithType() {
// Copy into CPU context for comparison
cmp_tensor_.CopyFrom(Input(0));
auto *const cmp_data = cmp_tensor_.template data<T>();
for (const auto i : c10::irange(cmp_tensor_.numel())) {
CAFFE_ENFORCE((bool)cmp_data[i], [&]() {
std::stringstream ss;
ss << "Assert failed for element " << i
<< " in tensor, value: " << cmp_data[i] << "\n";
if (!error_msg_.empty()) {
ss << "Error message: " << error_msg_;
}
return ss.str();
}());
}
return true;
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<long, int, bool>>::call(this, Input(0));
}
private:
Tensor cmp_tensor_{CPU};
std::string error_msg_;
};
} // namespace caffe2
#endif /* CAFFE2_OPERATORS_ASSERT_OP_H_ */
| 1,374
| 24.943396
| 78
|
h
|
null |
pytorch-main/caffe2/operators/async_net_barrier_op.h
|
#ifndef CAFFE2_OPERATORS_ASYNC_BARRIER_OP_H_
#define CAFFE2_OPERATORS_ASYNC_BARRIER_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class AsyncNetBarrierOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(AsyncNetBarrierOp)
bool RunOnDevice() override {
// This is a pretty much no-op operator, since it's only purposes is make
// sure that async_scheduling will schedule certain operations earlier than
// others.
//
// Exaple where this operator can work well - mixture of data-parallel and
// model parallel training, where one wants to force that all copies are
// started before data-parallel part starts.
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ASYNC_BARRIER_OP_H_
| 904
| 28.193548
| 79
|
h
|
null |
pytorch-main/caffe2/operators/batch_box_cox_op.h
|
#ifndef CAFFE_OPERATORS_BATCH_BOX_COX_OPS_H_
#define CAFFE_OPERATORS_BATCH_BOX_COX_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(BatchBoxCox);
namespace caffe2 {
template <class Context>
class BatchBoxCoxOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchBoxCoxOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
min_block_size_(
this->template GetSingleArgument<int>("min_block_size", 256)) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(DATA));
}
template <typename T>
bool DoRunWithType();
protected:
std::size_t min_block_size_;
INPUT_TAGS(DATA, LAMBDA1, LAMBDA2);
};
} // namespace caffe2
#endif // CAFFE_OPERATORS_BATCH_BOX_COX_OPS_H_
| 1,015
| 24.4
| 79
|
h
|
null |
pytorch-main/caffe2/operators/batch_bucketize_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef CAFFE2_OPERATORS_BATCH_BUCKETIZE_OP_H_
#define CAFFE2_OPERATORS_BATCH_BUCKETIZE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class BatchBucketizeOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchBucketizeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override;
protected:
INPUT_TAGS(FEATURE, INDICES, BOUNDARIES, LENGTHS);
OUTPUT_TAGS(O);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BATCH_BUCKETIZE_OP_H_
| 720
| 22.258065
| 57
|
h
|
null |
pytorch-main/caffe2/operators/batch_gather_ops.h
|
#ifndef CAFFE2_OPERATORS_BATCH_GATHER_OPS_H_
#define CAFFE2_OPERATORS_BATCH_GATHER_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
// Reuse helper logic from GatherOp since BatchGather is the same with axis=1.
#include "caffe2/operators/gather_op.h"
namespace caffe2 {
template <class Context>
class BatchGatherOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchGatherOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "match_outer", match_outer_, false) {}
// virtual ~BatchGatherOp() noexcept {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, this->template Input<Tensor>(INDICES, CPU));
}
template <typename TInd>
bool DoRunWithType() {
// BatchGather is a special-case of Gather with Axis = 1.
return gather_helper::gather_impl<TInd, Context>(
this, DATA, INDICES, 0, 1, false, match_outer_);
}
INPUT_TAGS(DATA, INDICES);
protected:
bool match_outer_;
};
template <class Context>
class BatchGatherGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
// Constructor to receive axis in case it was passed for GatherOp gradient,
// use default of 1 for batch gather otherwise.
template <class... Args>
explicit BatchGatherGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, 1),
OP_SINGLE_ARG(bool, "match_outer", match_outer_, false) {}
~BatchGatherGradientOp() noexcept override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, this->template Input<Tensor>(INDICES, CPU));
}
template <typename TInd>
bool DoRunWithType() {
return DispatchHelper<
TensorTypes2<float, GenericTensorImplementation>,
TInd>::call(this, Input(DATA));
}
template <typename TInd, typename TData>
bool DoRunWithType2() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto& grad = Input(GRAD);
// ONNX allows negative axis to index from the back, valid range: [-r, r].
int axis = axis_;
bool match_outer = match_outer_;
if (axis < 0) {
axis = data.dim() + axis;
}
CAFFE_ENFORCE_GE(data.dim(), 2, "DATA should be at least 2-D");
// Outer dimensions of input data and gradient should be the same
// because they are preserved for gathers with axis > 0.
for (const auto acheck : c10::irange(axis)) {
CAFFE_ENFORCE_EQ(
data.size(acheck),
grad.size(acheck),
"batch gather outer dimensions should match");
}
auto* output = Output(0, data.sizes(), at::dtype<TData>());
TData* out_data = output->template mutable_data<TData>();
if (data.numel() <= 0) {
return true;
}
memset(out_data, 0, output->nbytes());
const TData* grad_data = grad.template data<TData>();
const TInd* idxs = indices.template data<TInd>();
auto outer_dims_product = data.size_to_dim(axis);
auto batch_size = data.size_from_dim(axis);
auto block_size = data.size_from_dim(axis + 1);
auto N = indices.numel();
auto idx_inner_dims_product = indices.size_from_dim(axis);
if (match_outer) {
CAFFE_ENFORCE_GE(axis, 1, "Axis should be at least 1");
for (const auto i : c10::irange(axis)) {
CAFFE_ENFORCE_EQ(
data.size(i),
indices.size(i),
"INDICES must have the same outer dims as DATA (before dim AXIS)");
}
N = idx_inner_dims_product;
}
auto gathered_grad_batch_size = N * block_size;
// Check indexing bounds.
auto src_indexing_axis_dim = data.dim(axis);
gather_helper::check_indexarray_range<TInd>(
idxs, N, src_indexing_axis_dim, false);
for (const auto batch : c10::irange(outer_dims_product)) {
auto grad_batch_base = grad_data + batch * gathered_grad_batch_size;
auto out_batch_base = out_data + batch * batch_size;
for (const auto i : c10::irange(N)) {
auto idx = idxs[i];
if (match_outer) {
idx = idxs[batch * idx_inner_dims_product + i];
}
if (idx < 0) {
idx = idx + src_indexing_axis_dim;
}
if (block_size == 1) {
out_batch_base[idx] += grad_batch_base[i];
} else {
math::Add(
block_size,
out_batch_base + idx * block_size,
grad_batch_base + i * block_size,
out_batch_base + idx * block_size,
&context_);
}
}
}
return true;
}
template <typename TInd>
bool DoRunWithOtherType2() {
CAFFE_THROW(
"BatchGatherGradient is not implemented on tensor of type ",
Input(DATA).meta().name(),
"consider adding it as a type in the DispatchHelper list or "
"implementing a generic version (which won't work for "
"duplicated indices though)");
}
INPUT_TAGS(DATA, INDICES, GRAD);
protected:
int axis_;
bool match_outer_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BATCH_GATHER_OPS_H_
| 5,276
| 30.041176
| 79
|
h
|
null |
pytorch-main/caffe2/operators/batch_matmul_op.h
|
#ifndef CAFFE2_OPERATORS_BATCH_MATMUL_OP_H_
#define CAFFE2_OPERATORS_BATCH_MATMUL_OP_H_
#include <algorithm>
#include <functional>
#include <numeric>
#include <string>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context, class Engine = DefaultEngine>
class BatchMatMulOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchMatMulOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "trans_a", trans_a_, false),
OP_SINGLE_ARG(bool, "trans_b", trans_b_, false),
OP_SINGLE_ARG(bool, "broadcast", broadcast_, false) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& A = Input(0);
const auto& B = Input(1);
const int A_ndim = A.dim();
const int B_ndim = B.dim();
const std::vector<std::int64_t> A_dims = A.sizes().vec();
const std::vector<std::int64_t> B_dims = B.sizes().vec();
const T* A_data = A.template data<T>();
const T* B_data = B.template data<T>();
if (A_ndim == 1 && B_ndim == 1) {
CAFFE_ENFORCE_EQ(A.numel(), B.numel());
auto* Y = Output(0, {1}, at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
math::Dot<T, Context>(A.numel(), A_data, B_data, Y_data, &context_);
return true;
}
if (A_ndim == 1) {
const int N = A.numel();
if (trans_b_) {
CAFFE_ENFORCE_EQ(B_dims[B_ndim - 1], N);
} else {
CAFFE_ENFORCE_EQ(B_dims[B_ndim - 2], N);
}
std::vector<std::int64_t> Y_dims(B_ndim - 1);
if (trans_b_) {
std::copy_n(B_dims.cbegin(), B_ndim - 1, Y_dims.begin());
} else {
std::copy_n(B_dims.cbegin(), B_ndim - 2, Y_dims.begin());
Y_dims.back() = B_dims.back();
}
auto* Y = Output(0, Y_dims, at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
if (trans_b_) {
const int M = B.numel() / N;
math::Gemv<T, Context, Engine>(
CblasNoTrans, M, N, 1.0f, B_data, A_data, 0.0f, Y_data, &context_);
} else {
const int M = B_dims[B_ndim - 1];
const int batch_size = B.numel() / (M * N);
if (batch_size == 1) {
math::Gemv<T, Context, Engine>(
CblasTrans, N, M, 1.0f, B_data, A_data, 0.0f, Y_data, &context_);
} else {
math::GemmStridedBatched<T, Context, Engine>(
CblasTrans,
CblasNoTrans,
batch_size,
M,
1,
N,
1.0f,
B_data,
M * N,
A_data,
0,
0.0f,
Y_data,
M,
&context_);
}
}
return true;
}
if (B_ndim == 1) {
const int N = B.numel();
if (trans_a_) {
CAFFE_ENFORCE_EQ(A_dims[A_ndim - 2], N);
} else {
CAFFE_ENFORCE_EQ(A_dims[A_ndim - 1], N);
}
const std::vector<std::int64_t> Y_dims(
A_dims.cbegin(), A_dims.cbegin() + A_ndim - 1);
auto* Y = Output(0, Y_dims, at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
if (trans_a_) {
const int M = A_dims[A_ndim - 1];
const int batch_size = A.numel() / (M * N);
if (batch_size == 1) {
math::Gemv<T, Context, Engine>(
CblasTrans, N, M, 1.0f, A_data, B_data, 0.0f, Y_data, &context_);
} else {
math::GemmStridedBatched<T, Context, Engine>(
CblasTrans,
CblasNoTrans,
batch_size,
M,
1,
N,
1.0f,
A_data,
M * N,
B_data,
0,
0.0f,
Y_data,
M,
&context_);
}
} else {
const int M = A.numel() / N;
math::Gemv<T, Context, Engine>(
CblasNoTrans, M, N, 1.0f, A_data, B_data, 0.0f, Y_data, &context_);
}
return true;
}
const int M = trans_a_ ? A_dims[A_ndim - 1] : A_dims[A_ndim - 2];
const int K = trans_a_ ? A_dims[A_ndim - 2] : A_dims[A_ndim - 1];
if (trans_b_) {
CAFFE_ENFORCE_EQ(B_dims[B_ndim - 1], K);
} else {
CAFFE_ENFORCE_EQ(B_dims[B_ndim - 2], K);
}
const int N = trans_b_ ? B_dims[B_ndim - 2] : B_dims[B_ndim - 1];
const int ndim = std::max(A_ndim, B_ndim);
std::vector<std::int64_t> A_broadcast_dims(ndim);
std::vector<std::int64_t> B_broadcast_dims(ndim);
std::vector<std::int64_t> Y_broadcast_dims(ndim);
math::utils::ComputeBroadcastBinaryOpDims(
A_ndim - 2,
A_dims.data(),
B_ndim - 2,
B_dims.data(),
A_broadcast_dims.data(),
B_broadcast_dims.data(),
Y_broadcast_dims.data());
Y_broadcast_dims[ndim - 2] = M;
Y_broadcast_dims[ndim - 1] = N;
auto* Y = Output(0, Y_broadcast_dims, at::dtype<T>());
T* Y_data = Y->template mutable_data<T>();
const int batch_dim = ndim - 2;
const bool is_broadcast_dims = !std::equal(
A_broadcast_dims.cbegin(),
A_broadcast_dims.cbegin() + batch_dim,
B_broadcast_dims.cbegin());
if (is_broadcast_dims) {
CAFFE_ENFORCE(broadcast_);
}
const std::int64_t A_batch_size = std::accumulate(
A_broadcast_dims.cbegin(),
A_broadcast_dims.cbegin() + batch_dim,
1LL,
std::multiplies<std::int64_t>());
const std::int64_t B_batch_size = std::accumulate(
B_broadcast_dims.cbegin(),
B_broadcast_dims.cbegin() + batch_dim,
1LL,
std::multiplies<std::int64_t>());
const std::int64_t Y_batch_size = std::accumulate(
Y_broadcast_dims.cbegin(),
Y_broadcast_dims.cbegin() + batch_dim,
1LL,
std::multiplies<std::int64_t>());
if (Y_batch_size == 0) {
return true;
}
if (A_batch_size == 1 && B_batch_size == 1) {
math::Gemm<T, Context, Engine>(
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
M,
N,
K,
1.0f,
A_data,
B_data,
0.0f,
Y_data,
&context_);
} else if (A_batch_size == 1) {
if (M == 1 && trans_b_) {
math::Gemv<T, Context, Engine>(
CblasNoTrans,
B_batch_size * N,
K,
1.0f,
B_data,
A_data,
0.0f,
Y_data,
&context_);
} else {
math::GemmStridedBatched<T, Context, Engine>(
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
Y_batch_size,
M,
N,
K,
1.0f,
A_data,
0,
B_data,
K * N,
0.0f,
Y_data,
M * N,
&context_);
}
} else if (B_batch_size == 1) {
if (!trans_a_) {
math::Gemm<T, Context, Engine>(
CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
A_batch_size * M,
N,
K,
1.0f,
A_data,
B_data,
0.0f,
Y_data,
&context_);
} else {
math::GemmStridedBatched<T, Context, Engine>(
CblasTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
Y_batch_size,
M,
N,
K,
1.0f,
A_data,
M * K,
B_data,
0,
0.0f,
Y_data,
M * N,
&context_);
}
} else if (!is_broadcast_dims) {
math::GemmStridedBatched<T, Context, Engine>(
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
Y_batch_size,
M,
N,
K,
1.0f,
A_data,
M * K,
B_data,
K * N,
0.0f,
Y_data,
M * N,
&context_);
} else {
std::vector<const T*> A_ptr(Y_batch_size);
std::vector<const T*> B_ptr(Y_batch_size);
std::vector<T*> Y_ptr(Y_batch_size);
std::vector<std::int64_t> index(batch_dim);
for (std::int64_t i = 0; i < Y_batch_size; ++i) {
const std::int64_t A_index = math::utils::GetIndexFromDims(
batch_dim, A_broadcast_dims.data(), index.data());
const std::int64_t B_index = math::utils::GetIndexFromDims(
batch_dim, B_broadcast_dims.data(), index.data());
A_ptr[i] = A_data + A_index * M * K;
B_ptr[i] = B_data + B_index * K * N;
Y_ptr[i] = Y_data + i * M * N;
math::utils::IncreaseIndexInDims(
batch_dim, Y_broadcast_dims.data(), index.data());
}
math::GemmBatched<T, Context, Engine>(
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
Y_batch_size,
M,
N,
K,
1.0f,
A_ptr.data(),
B_ptr.data(),
0.0f,
Y_ptr.data(),
&context_);
}
return true;
}
private:
const bool trans_a_;
const bool trans_b_;
const bool broadcast_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BATCH_MATMUL_OP_H_
| 9,602
| 28.457055
| 79
|
h
|
null |
pytorch-main/caffe2/operators/batch_moments_op.h
|
#ifndef CAFFE2_OPERATORS_BATCH_MOMENTS_OP_H_
#define CAFFE2_OPERATORS_BATCH_MOMENTS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class BatchMomentsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchMomentsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))) {
CAFFE_ENFORCE_NE(order_, StorageOrder::UNKNOWN);
}
bool RunOnDevice() override {
const auto& X = Input(0);
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1);
const int HxW = X.numel() / (N * C);
auto* mu = Output(0, {C}, at::dtype<T>());
auto* var = Output(1, {C}, at::dtype<T>());
const T* X_data = X.template data<T>();
T* mu_data = mu->template mutable_data<T>();
T* var_data = var->template mutable_data<T>();
return order_ == StorageOrder::NCHW
? ComputeBatchMomentsNCHW(N, C, HxW, X_data, mu_data, var_data)
: ComputeBatchMomentsNHWC(N, C, HxW, X_data, mu_data, var_data);
}
private:
bool ComputeBatchMomentsNCHW(
const int N,
const int C,
const int HxW,
const T* X,
T* mu,
T* var);
bool ComputeBatchMomentsNHWC(
const int N,
const int C,
const int HxW,
const T* X,
T* mu,
T* var);
const StorageOrder order_;
};
template <typename T, class Context>
class BatchMomentsGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchMomentsGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))) {
CAFFE_ENFORCE_NE(order_, StorageOrder::UNKNOWN);
}
bool RunOnDevice() override {
const auto& dmu = Input(0);
const auto& dvar = Input(1);
const auto& X = Input(2);
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1);
const int HxW = X.numel() / (N * C);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
const T* dmu_data = dmu.template data<T>();
const T* dvar_data = dvar.template data<T>();
const T* X_data = X.template data<T>();
T* dX_data = dX->template mutable_data<T>();
return order_ == StorageOrder::NCHW
? ComputeBatchMomentsGradientNCHW(
N, C, HxW, dmu_data, dvar_data, X_data, dX_data)
: ComputeBatchMomentsGradientNHWC(
N, C, HxW, dmu_data, dvar_data, X_data, dX_data);
}
private:
bool ComputeBatchMomentsGradientNCHW(
const int N,
const int C,
const int HxW,
const T* dmu,
const T* dvar,
const T* X,
T* dX);
bool ComputeBatchMomentsGradientNHWC(
const int N,
const int C,
const int HxW,
const T* dmu,
const T* dvar,
const T* X,
T* dX);
const StorageOrder order_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BATCH_MOMENTS_OP_H_
| 3,364
| 27.277311
| 80
|
h
|
null |
pytorch-main/caffe2/operators/batch_permutation_op.h
|
#ifndef BATCHPERMUTATION_OP_H_
#define BATCHPERMUTATION_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(BatchPermutation)
namespace caffe2 {
template <typename T, class Context>
class BatchPermutationOp final : public Operator<Context> {
public:
template <class... Args>
explicit BatchPermutationOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
template <typename T, class Context>
class BatchPermutationGradientOp final : public Operator<Context> {
public:
BatchPermutationGradientOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
} // namespace caffe2
#endif // BATCHPERMUTATION_OP_H_
| 972
| 24.605263
| 67
|
h
|
null |
pytorch-main/caffe2/operators/batch_sparse_to_dense_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef CAFFE2_OPERATORS_BATCH_SPARSE_TO_DENSE_OP_H_
#define CAFFE2_OPERATORS_BATCH_SPARSE_TO_DENSE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class BatchSparseToDenseOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_DISPATCH_HELPER;
template <class... Args>
explicit BatchSparseToDenseOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int64_t, "dense_last_dim", dense_last_dim_, -1),
OP_SINGLE_ARG(T, "default_value", default_value_, static_cast<T>(0)) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(LENGTHS));
}
private:
template <typename TLen, typename TInd>
void FillInDenseValues(
const int64_t batch_size,
const int64_t indice_lengths,
const TLen* lengths_data,
const TInd* indices_data,
const T* values_data,
T* output_data,
Context* context);
template <typename TLen>
bool DoRunWithType() {
return DispatchHelper<
TensorTypes2<
int32_t,
int64_t,
GenericTensorImplementation>,
TLen>::call(this, Input(INDICES));
}
template <typename TLen, typename TInd>
bool DoRunWithType2() {
auto& lengths = Input(LENGTHS);
auto& indices = Input(INDICES);
auto& values = Input(VALUES);
CAFFE_ENFORCE_EQ(indices.numel(), values.numel());
CAFFE_ENFORCE_EQ(lengths.dim(), 1);
CAFFE_ENFORCE_EQ(indices.dim(), 1);
const TLen* lengths_data = lengths.template data<TLen>();
const TInd* indices_data = indices.template data<TInd>();
const T* values_data = values.template data<T>();
int64_t batch_size = lengths.numel();
vector<int64_t> output_shape = {batch_size};
if (InputSize() == 4) {
auto& shaper = Input(3);
CAFFE_ENFORCE_EQ(shaper.dim(), 2);
if (dense_last_dim_ == -1) {
dense_last_dim_ = shaper.size(1);
} else {
CAFFE_ENFORCE(
dense_last_dim_ == shaper.size(1),
"The last dim argument is not aligned with the shape input last dim");
}
} else {
CAFFE_ENFORCE(dense_last_dim_ >= 1, "The last dim of dense must be >= 1");
}
output_shape.push_back(dense_last_dim_);
auto* output = Output(0, output_shape, at::dtype<T>());
T* output_data = output->template mutable_data<T>();
math::Set(
output->numel(),
static_cast<T>(default_value_),
output_data,
&context_);
FillInDenseValues(
batch_size,
indices.numel(),
lengths_data,
indices_data,
values_data,
output_data,
&context_);
return true;
}
template <typename TLen>
bool DoRunWithOtherType2() {
CAFFE_THROW(
"BatchSparseToDense is not implemented on values of type ",
Input(VALUES).dtype().name(),
" with lengths of type ",
Input(LENGTHS).dtype().name(),
" and indices of type ",
Input(INDICES).dtype().name());
}
int64_t dense_last_dim_;
T default_value_;
INPUT_TAGS(LENGTHS, INDICES, VALUES);
// len_prefix_sum_ and len_prefix_tmp_ are buffers on the GPU. It is not used
// in the CPUContext implementation.
Tensor len_prefix_sum_{Context::GetDeviceType()};
Tensor len_prefix_tmp_{Context::GetDeviceType()};
};
template <typename T, class Context>
class BatchDenseToSparseOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_DISPATCH_HELPER;
template <class... Args>
explicit BatchDenseToSparseOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override{
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(LENGTHS));
}
private:
template <typename TLen, typename TInd>
void FillInSparseValues(
const int64_t batch_size,
const int64_t indice_lengths,
const TLen* lengths_data,
const TInd* indices_data,
const T* dense_data,
T* output_data,
Context* context);
template <typename TLen>
bool DoRunWithType() {
return DispatchHelper<
TensorTypes2<
int32_t,
int64_t,
GenericTensorImplementation>,
TLen>::call(this, Input(INDICES));
}
template <typename TLen, typename TInd>
bool DoRunWithType2() {
auto& lengths = Input(LENGTHS);
auto& indices = Input(INDICES);
auto& dense = Input(DENSE);
CAFFE_ENFORCE_EQ(lengths.dim(), 1);
CAFFE_ENFORCE_EQ(indices.dim(), 1);
CAFFE_ENFORCE_EQ(dense.dim(), 2);
const TLen* lengths_data = lengths.template data<TLen>();
const TInd* indices_data = indices.template data<TInd>();
const T* dense_data = dense.template data<T>();
int64_t batch_size = lengths.numel();
CAFFE_ENFORCE_EQ(batch_size, dense.size(0));
dense_last_dim_ = dense.size(1);
vector<int64_t> output_shape = indices.sizes().vec();
auto* output = Output(0, output_shape, at::dtype<T>());
T* output_data = output->template mutable_data<T>();
FillInSparseValues(
batch_size,
indices.numel(),
lengths_data,
indices_data,
dense_data,
output_data,
&context_);
return true;
}
template <typename TLen>
bool DoRunWithOtherType2() {
CAFFE_THROW(
"BatchDenseToSparse is not implemented on values of type ",
Input(DENSE).dtype().name(),
" with lengths of type ",
Input(LENGTHS).dtype().name(),
" and indices of type ",
Input(INDICES).dtype().name());
}
int64_t dense_last_dim_{};
INPUT_TAGS(LENGTHS, INDICES, DENSE);
// len_prefix_sum_ and len_prefix_tmp_ are buffers on the GPU. It is not used
// in the CPUContext implementation.
Tensor len_prefix_sum_{Context::GetDeviceType()};
Tensor len_prefix_tmp_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BATCH_SPARSE_TO_DENSE_OP_H_
| 6,155
| 28.454545
| 82
|
h
|
null |
pytorch-main/caffe2/operators/bbox_transform_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef BBOX_TRANSFORM_OP_H_
#define BBOX_TRANSFORM_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(BBoxTransform)
namespace caffe2 {
template <typename T, class Context>
class BBoxTransformOp final : public Operator<Context> {
public:
template <class... Args>
explicit BBoxTransformOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
weights_(this->template GetRepeatedArgument<T>(
"weights",
vector<T>{1.0f, 1.0f, 1.0f, 1.0f})),
apply_scale_(
this->template GetSingleArgument<bool>("apply_scale", true)),
rotated_(this->template GetSingleArgument<bool>("rotated", false)),
angle_bound_on_(
this->template GetSingleArgument<bool>("angle_bound_on", true)),
angle_bound_lo_(
this->template GetSingleArgument<int>("angle_bound_lo", -90)),
angle_bound_hi_(
this->template GetSingleArgument<int>("angle_bound_hi", 90)),
clip_angle_thresh_(
this->template GetSingleArgument<float>("clip_angle_thresh", 1.0)),
legacy_plus_one_(
this->template GetSingleArgument<bool>("legacy_plus_one", true)) {
CAFFE_ENFORCE_EQ(
weights_.size(),
4,
"weights size " + c10::to_string(weights_.size()) + "must be 4.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// weights [wx, wy, ww, wh] to apply to the regression target
vector<T> weights_;
// Transform the boxes to the scaled image space after applying the bbox
// deltas.
// Set to false to match the detectron code, set to true for the keypoint
// model and for backward compatibility
bool apply_scale_{true};
// Set for RRPN case to handle rotated boxes. Inputs should be in format
// [ctr_x, ctr_y, width, height, angle (in degrees)].
bool rotated_{false};
// If set, for rotated boxes in RRPN, output angles are normalized to be
// within [angle_bound_lo, angle_bound_hi].
bool angle_bound_on_{true};
int angle_bound_lo_{-90};
int angle_bound_hi_{90};
// For RRPN, clip almost horizontal boxes within this threshold of
// tolerance for backward compatibility. Set to negative value for
// no clipping.
float clip_angle_thresh_{1.0};
// The infamous "+ 1" for box width and height dating back to the DPM days
bool legacy_plus_one_{true};
};
} // namespace caffe2
#endif // BBOX_TRANSFORM_OP_H_
| 2,668
| 35.067568
| 79
|
h
|
null |
pytorch-main/caffe2/operators/bisect_percentile_op.h
|
#ifndef CAFFE2_OPERATORS_BISECT_PERCENTILE_OP_H_
#define CAFFE2_OPERATORS_BISECT_PERCENTILE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class BisectPercentileOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BisectPercentileOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
pct_raw_(OperatorBase::GetRepeatedArgument<float>(
"percentile_raw",
vector<float>{})),
pct_mapping_(OperatorBase::GetRepeatedArgument<float>(
"percentile_mapping",
vector<float>{})),
pct_lower_(OperatorBase::GetRepeatedArgument<float>(
"percentile_lower",
vector<float>{})),
pct_upper_(OperatorBase::GetRepeatedArgument<float>(
"percentile_upper",
vector<float>{})),
pct_lens_(
OperatorBase::GetRepeatedArgument<int>("lengths", vector<int>{})) {
CAFFE_ENFORCE_EQ(
pct_raw_.size(),
pct_mapping_.size(),
"Feature (raw) data and percentile value dimension should match.");
CAFFE_ENFORCE_EQ(
pct_raw_.size(),
pct_lower_.size(),
"Feature (raw) data and lower bound dimension should match.");
CAFFE_ENFORCE_EQ(
pct_raw_.size(),
pct_upper_.size(),
"Feature (raw) data and upper bound dimension should match.");
n_features = pct_lens_.size();
index.resize(n_features + 1);
index[0] = 0;
for (int i = 1; i <= n_features; ++i) {
index[i] = index[i - 1] + pct_lens_[i - 1];
}
CAFFE_ENFORCE_EQ(
index[n_features], // The sum of lengths_data
pct_raw_.size(),
"Sum of lengths should be equal to the total number of percentile "
"mapping data samples");
}
bool RunOnDevice() override {
// Input
const auto& raw = Input(RAW);
CAFFE_ENFORCE_EQ(raw.dim(), 2);
const auto batch_size = raw.size(0);
const auto num_features = raw.size(1);
CAFFE_ENFORCE_EQ(num_features, pct_lens_.size());
const float *const raw_data = raw.template data<float>();
// Output
auto *const pct = Output(PCT, raw.sizes(), at::dtype<float>());
float *const pct_output = pct->template mutable_data<float>();
// Compute percentile for each raw feature value
int feature_start_index = 0;
int feature_length = 0;
int cur_index = 0;
for (const auto i : c10::irange(num_features)) {
cur_index = i;
feature_start_index = index[i];
feature_length = pct_lens_[i];
for (const auto j : c10::irange(batch_size)) {
(void)j; // Suppress unused variable warning
pct_output[cur_index] = compute_percentile(
pct_raw_.begin() + feature_start_index,
pct_mapping_.begin() + feature_start_index,
pct_lower_.begin() + feature_start_index,
pct_upper_.begin() + feature_start_index,
feature_length,
raw_data[cur_index]);
cur_index += num_features;
}
}
return true;
}
protected:
INPUT_TAGS(RAW);
OUTPUT_TAGS(PCT);
private:
int n_features;
vector<float> pct_raw_;
vector<float> pct_mapping_;
vector<float> pct_lower_;
vector<float> pct_upper_;
vector<int> pct_lens_;
vector<int> index;
vector<std::map<float, float>> fast_pct;
static constexpr float kEPSILON = 1e-10;
int64_t binary_search(
const std::vector<float>::iterator& data,
int64_t lo,
int64_t hi,
const float val) {
while (lo < hi) {
const auto mid = lo + (hi - lo) / 2;
const bool low_cond = (data[mid] <= val);
const bool high_cond = (val < data[mid + 1]);
if (low_cond && high_cond) {
return mid;
} else if (!low_cond) {
hi = mid - 1;
} else {
lo = mid + 1;
}
}
return lo;
}
float compute_percentile(
const std::vector<float>::iterator& pct_raw_it,
const std::vector<float>::iterator& pct_mapping_it,
const std::vector<float>::iterator& pct_lower_it,
const std::vector<float>::iterator& pct_upper_it,
const int size,
const float val) {
// Corner cases where no interpolation is needed.
if (val < pct_raw_it[0]) {
return 0.;
}
if (val > pct_raw_it[size - 1]) {
return 1.;
}
// Interpolation by binary search
const auto k = binary_search(pct_raw_it, 0, size - 1, val);
if (pct_raw_it[k] == val) {
// Exact match
return pct_mapping_it[k];
} else {
// interpolation
const float w = (val - pct_raw_it[k]) /
(pct_raw_it[k + 1] - pct_raw_it[k] + kEPSILON);
return (1 - w) * pct_upper_it[k] + w * pct_lower_it[k + 1];
}
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BISECT_PERCENTILE_OP_H_
| 5,023
| 29.26506
| 79
|
h
|
null |
pytorch-main/caffe2/operators/boolean_mask_ops.h
|
#ifndef CAFFE2_OPERATORS_BOOLEAN_MASK_OPS_H_
#define CAFFE2_OPERATORS_BOOLEAN_MASK_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
template <class Context>
class BooleanMaskOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BooleanMaskOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override;
};
template <class Context>
class BooleanMaskOpGradient final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
BooleanMaskOpGradient(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
/* Calculating the gradient of the Boolean Mask operator
* requires access to the original mask that's passed in,
* and the gradient to backpropagate.
*/
bool RunOnDevice() override {
return DispatchHelper<
TensorTypes<bool, std::int32_t, std::int64_t, float, double>>::
call(this, Input(1));
}
template <typename T>
bool DoRunWithType();
};
template <class Context>
class SequenceMaskOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit SequenceMaskOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
axis_(this->template GetSingleArgument<int>("axis", 1)),
radius_(this->template GetSingleArgument<int>("radius", 10)),
grad_(this->template GetSingleArgument<bool>("grad", false)),
fill_val_(this->template GetSingleArgument<float>(
"fill_val",
-1.0f * std::numeric_limits<float>::infinity())) {
// Mode argument is required
mode_ = GetArgument(operator_def, "mode").s();
// batch argument is optional, but if not given, we don't want a default val
if (HasArgument("batch")) {
batch_ = GetArgument(operator_def, "batch").i();
}
if (HasArgument("repeat_from_axis")) {
CAFFE_ENFORCE(
mode_ == "sequence",
"repeat_from_axis currently only supported in sequence mode.");
CAFFE_ENFORCE(
!HasArgument("batch"),
"repeat_from_axis and batch not currently supported together.");
repeat_from_ =
this->template GetSingleArgument<int>("repeat_from_axis", -1);
}
}
bool RunOnDevice() override;
template <typename T>
bool DoRunWithType();
private:
int axis_;
int radius_;
std::string mode_;
bool grad_;
float fill_val_;
int batch_;
int repeat_from_;
};
} // namespace caffe2
#endif
| 2,665
| 27.978261
| 80
|
h
|
null |
pytorch-main/caffe2/operators/box_with_nms_limit_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef BOX_WITH_NMS_AND_LIMIT_OP_H_
#define BOX_WITH_NMS_AND_LIMIT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(BoxWithNMSLimit)
namespace caffe2 {
// C++ implementation of function insert_box_results_with_nms_and_limit()
template <class Context>
class BoxWithNMSLimitOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BoxWithNMSLimitOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
score_thres_(
this->template GetSingleArgument<float>("score_thresh", 0.05)),
nms_thres_(this->template GetSingleArgument<float>("nms", 0.3)),
detections_per_im_(
this->template GetSingleArgument<int>("detections_per_im", 100)),
soft_nms_enabled_(
this->template GetSingleArgument<bool>("soft_nms_enabled", false)),
soft_nms_method_str_(this->template GetSingleArgument<std::string>(
"soft_nms_method",
"linear")),
soft_nms_sigma_(
this->template GetSingleArgument<float>("soft_nms_sigma", 0.5)),
soft_nms_min_score_thres_(this->template GetSingleArgument<float>(
"soft_nms_min_score_thres",
0.001)),
rotated_(this->template GetSingleArgument<bool>("rotated", false)),
cls_agnostic_bbox_reg_(this->template GetSingleArgument<bool>(
"cls_agnostic_bbox_reg",
false)),
input_boxes_include_bg_cls_(this->template GetSingleArgument<bool>(
"input_boxes_include_bg_cls",
true)),
output_classes_include_bg_cls_(this->template GetSingleArgument<bool>(
"output_classes_include_bg_cls",
true)),
legacy_plus_one_(
this->template GetSingleArgument<bool>("legacy_plus_one", true)) {
CAFFE_ENFORCE(
soft_nms_method_str_ == "linear" || soft_nms_method_str_ == "gaussian",
"Unexpected soft_nms_method");
soft_nms_method_ = (soft_nms_method_str_ == "linear") ? 1 : 2;
// When input `boxes` doesn't include background class, the score will skip
// background class and start with foreground classes directly, and put the
// background class in the end, i.e. score[:, 0:NUM_CLASSES-1] represents
// foreground classes and score[:,NUM_CLASSES] represents background class.
input_scores_fg_cls_starting_id_ = (int)input_boxes_include_bg_cls_;
}
~BoxWithNMSLimitOp() override {}
bool RunOnDevice() override {
if (InputSize() > 2) {
return DispatchHelper<TensorTypes<int, float>>::call(this, Input(2));
} else {
return DoRunWithType<float>();
}
}
template <typename T>
bool DoRunWithType();
protected:
// TEST.SCORE_THRESH
float score_thres_ = 0.05;
// TEST.NMS
float nms_thres_ = 0.3;
// TEST.DETECTIONS_PER_IM
int detections_per_im_ = 100;
// TEST.SOFT_NMS.ENABLED
bool soft_nms_enabled_ = false;
// TEST.SOFT_NMS.METHOD
std::string soft_nms_method_str_ = "linear";
unsigned int soft_nms_method_ = 1; // linear
// TEST.SOFT_NMS.SIGMA
float soft_nms_sigma_ = 0.5;
// Lower-bound on updated scores to discard boxes
float soft_nms_min_score_thres_ = 0.001;
// Set for RRPN case to handle rotated boxes. Inputs should be in format
// [ctr_x, ctr_y, width, height, angle (in degrees)].
bool rotated_{false};
// MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
bool cls_agnostic_bbox_reg_{false};
// Whether input `boxes` includes background class. If true, boxes will have
// shape of (N, (num_fg_class+1) * 4or5), otherwise (N, num_fg_class * 4or5)
bool input_boxes_include_bg_cls_{true};
// Whether output `classes` includes background class. If true, index 0 will
// represent background, and valid outputs start from 1.
bool output_classes_include_bg_cls_{true};
// The index where foreground starts in scoures. Eg. if 0 represents
// background class then foreground class starts with 1.
int input_scores_fg_cls_starting_id_{1};
// The infamous "+ 1" for box width and height dating back to the DPM days
bool legacy_plus_one_{true};
// Map a class id (starting with background and then foreground) from (0, 1,
// ..., NUM_FG_CLASSES) to it's matching value in box
inline int get_box_cls_index(int bg_fg_cls_id) {
if (cls_agnostic_bbox_reg_) {
return 0;
} else if (!input_boxes_include_bg_cls_) {
return bg_fg_cls_id - 1;
} else {
return bg_fg_cls_id;
}
}
// Map a class id (starting with background and then foreground) from (0, 1,
// ..., NUM_FG_CLASSES) to it's matching value in score
inline int get_score_cls_index(int bg_fg_cls_id) {
return bg_fg_cls_id - 1 + input_scores_fg_cls_starting_id_;
}
};
} // namespace caffe2
#endif // BOX_WITH_NMS_AND_LIMIT_OP_H_
| 4,969
| 37.828125
| 79
|
h
|
null |
pytorch-main/caffe2/operators/bucketize_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef CAFFE2_OPERATORS_BUCKETIZE_OP_H_
#define CAFFE2_OPERATORS_BUCKETIZE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(Bucketize);
namespace caffe2 {
template <class Context>
class BucketizeOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BucketizeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
boundaries_(this->template GetRepeatedArgument<float>("boundaries")) {
CAFFE_ENFORCE(
std::is_sorted(boundaries_.begin(), boundaries_.end()),
"The boundaries need to be monotonically increasing");
boundaries_device_.Resize(boundaries_.size());
context_.template CopyFromCPU<float>(
boundaries_.size(),
boundaries_.data(),
boundaries_device_.mutable_data<float>());
context_.FinishDeviceComputation();
}
bool RunOnDevice() override;
protected:
INPUT_TAGS(X);
OUTPUT_TAGS(INDICES);
private:
std::vector<float> boundaries_;
Tensor boundaries_device_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BUCKETIZE_OP_H_
| 1,361
| 25.705882
| 78
|
h
|
null |
pytorch-main/caffe2/operators/byte_weight_dequant_op.h
|
#ifndef CAFFE2_OPERATORS_BYTE_WEIGHT_DEQUANT_OP_H_
#define CAFFE2_OPERATORS_BYTE_WEIGHT_DEQUANT_OP_H_
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename Context>
class ByteWeightDequantOp : public Operator<Context> {
public:
ByteWeightDequantOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
min_(this->template GetSingleArgument<float>("min", -3)),
max_(this->template GetSingleArgument<float>("max", 3)),
shape_(this->template GetRepeatedArgument<int64_t>("shape")) {}
USE_OPERATOR_FUNCTIONS(Context);
using Operator<Context>::Operator;
bool RunOnDevice() override {
const auto& WI = Input(0);
auto* Y = Output(0, shape_, at::dtype<float>());
float bin_interval = (max_ - min_) / 255.0;
int total = 1;
for (const auto i : c10::irange(0U, shape_.size())) {
total *= Y->size(i);
}
const uint8_t* Xdata;
if (WI.template IsType<uint8_t>()) {
CAFFE_ENFORCE(total, WI.nbytes());
Xdata = WI.template data<uint8_t>();
} else {
CAFFE_ENFORCE(total, WI.template data<std::string>()[0].size());
Xdata = reinterpret_cast<const uint8_t*>(
WI.template data<std::string>()[0].c_str());
}
auto* Ydata = Y->template mutable_data<float>();
ConstEigenVectorMap<uint8_t> index(&Xdata[0], total);
EigenVectorMap<float> weights(&Ydata[0], total);
weights = (index.cast<float>().array() * bin_interval) + min_;
return true;
}
private:
float min_;
float max_;
std::vector<int64_t> shape_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_BYTE_WEIGHT_DEQUANT_OP_H_
| 1,732
| 29.946429
| 71
|
h
|
null |
pytorch-main/caffe2/operators/cast_op.h
|
#pragma once
#include <c10/util/irange.h>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/cast.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class CastOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit CastOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {
const ArgumentHelper helper(operator_def);
TensorProto_DataType to = cast::GetCastDataType(helper, "to");
SetBody(to);
}
bool RunOnDevice() override {
return (this->*body_)();
}
// Allow for Context-specific implementations
void SetBody(TensorProto_DataType to);
template <typename DstType>
bool DoRunWithDstType();
template <typename DstType, typename SrcType>
bool DoRunWithType() {
auto& input = Input(0);
auto* output = Output(0);
output->ResizeLike(input);
const auto* data = input.template data<SrcType>();
auto* out = output->template mutable_data<DstType>();
auto N = input.size();
for (const auto i : c10::irange(N)) {
out[i] = static_cast<DstType>(data[i]);
}
return true;
}
private:
bool (CastOp::*body_)();
};
} // namespace caffe2
| 1,350
| 23.125
| 66
|
h
|
null |
pytorch-main/caffe2/operators/cc_bmm_bg_op.h
|
#ifndef CAFFE2_FB_OPERATORS_CC_BMM_BG_H_
#define CAFFE2_FB_OPERATORS_CC_BMM_BG_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
using T = float;
using TInd = int;
using Engine = DefaultEngine;
template <class Context>
class ConcatBatchMatMulBatchGatherOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
ConcatBatchMatMulBatchGatherOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override;
protected:
int axis_ = 1;
int add_axis_ = 1;
bool trans_a_ = 0;
bool trans_b_ = 1;
bool broadcast_ = 0;
};
template <class Context>
bool ConcatBatchMatMulBatchGatherOp<Context>::RunOnDevice() {
auto& indices = Input(0);
auto& input_zero = Input(1);
int adj_size = input_zero.dim() + 1;
int canonical_axis = 1;
CAFFE_ENFORCE_LT(canonical_axis, adj_size, "Axis not in input ndim range.");
for (const auto i : c10::irange(2, InputSize())) {
CAFFE_ENFORCE(
Input(i).dtype() == input_zero.dtype(),
"All inputs must have the same type, expected: ",
input_zero.dtype().name(),
" but got: ",
Input(i).dtype().name(),
" for input: ",
i);
}
int before = 1, after = 1;
for (const auto i : c10::irange(input_zero.dim())) {
int dim = input_zero.dim32(i);
if (i < canonical_axis) {
before *= dim;
} else { // i > canonical_axis || i == canonical_axis && add_axis_
after *= dim;
}
// check the input dims are compatible.
for (const auto j : c10::irange(2, InputSize())) {
int dim_j = Input(j).dim32(i);
CAFFE_ENFORCE(
dim == dim_j,
"Expect dimension = ",
dim,
" got ",
dim_j,
" at axis = ",
i,
" for input: ",
j,
". The input tensors can only have different dimensions "
"when arg 'add_axis' = 0 and along the axis = ",
canonical_axis,
" <",
input_zero.sizes(),
"> vs <",
Input(j).sizes(),
">.");
}
}
auto ndata = InputSize() - 1;
auto batch_size = before;
auto embed_size = after;
auto gather_size = indices.sizes()[0];
vector<int64_t> output_dims;
output_dims.push_back(batch_size);
output_dims.insert(
output_dims.begin() + 1, indices.sizes().begin(), indices.sizes().end());
auto* output = Output(0, output_dims, at::dtype<T>());
// std::stringstream ss;
// ss << "[";
// for (const auto i : c10::irange(output_dims.size()))ss << output_dims[i];
// ss << "]";
// LOG(INFO) << "output size: " << ss.str();
auto* output_data = output->template mutable_data<T>();
auto* indices_data = indices.template data<TInd>();
#pragma omp parallel
{
std::vector<T> scratch_input(ndata * embed_size);
std::vector<T> scratch_output(ndata * ndata);
#pragma omp for
for (int b = 0; b < batch_size; ++b) {
// concat input to scratch
for (const auto i : c10::irange(1, InputSize())) {
auto* input_data = Input(i).template data<T>();
memcpy(
&scratch_input[(i - 1) * embed_size],
input_data + b * embed_size,
embed_size * Input(i).itemsize());
}
// call mkl gemm
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasTrans,
ndata,
ndata,
embed_size,
1,
&scratch_input[0],
&scratch_input[0],
0,
&scratch_output[0],
&context_);
// do gather
int64_t output_offset = b * gather_size;
for (const auto i : c10::irange(gather_size)) {
output_data[output_offset + i] = scratch_output[indices_data[i]];
}
}
}
return true;
}
} // namespace caffe2
#endif // CAFFE2_FB_OPERATORS_CC_BMM_BG_H_
| 3,980
| 26.455172
| 80
|
h
|
null |
pytorch-main/caffe2/operators/ceil_op.h
|
#ifndef CAFFE2_OPERATORS_CEIL_OP_H_
#define CAFFE2_OPERATORS_CEIL_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include <c10/util/irange.h>
namespace caffe2 {
template <typename T, class Context>
class CeilOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(CeilOp);
bool RunOnDevice() override {
auto& X = Input(0);
auto *const Y = Output(0, X.sizes(), at::dtype<float>());
const float *const Xdata = X.template data<float>();
float *const Ydata = Y->template mutable_data<float>();
for (const auto i : c10::irange(X.numel())) {
Ydata[i] = std::ceil(Xdata[i]);
}
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CEIL_OP_H_
| 837
| 22.942857
| 61
|
h
|
null |
pytorch-main/caffe2/operators/channel_backprop_stats_op.h
|
#ifndef CHANNEL_BACKPROP_STATS_OP_H
#define CHANNEL_BACKPROP_STATS_OP_H
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class ChannelBackpropStatsOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ChannelBackpropStatsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
~ChannelBackpropStatsOp() override {}
bool RunOnDevice() override {
return true;
}
protected:
INPUT_TAGS(INPUT, SAVED_MEAN, SAVED_INV_STDDEV, OUTPUT_GRAD);
OUTPUT_TAGS(SCALE_GRAD, BIAS_GRAD);
Tensor dBiasScratch_;
Tensor dScaleScratch_;
};
} // namespace caffe2
#endif
| 746
| 20.970588
| 63
|
h
|
null |
pytorch-main/caffe2/operators/channel_shuffle_op.h
|
#ifndef CAFFE2_OPERATORS_CHANNEL_SHUFFLE_OP_H_
#define CAFFE2_OPERATORS_CHANNEL_SHUFFLE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class ChannelShuffleOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ChannelShuffleOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))),
OP_SINGLE_ARG(int, "group", group_, 1) {
CAFFE_ENFORCE_NE(order_, StorageOrder::UNKNOWN);
}
bool RunOnDevice() override {
return order_ == StorageOrder::NCHW ? RunOnDeviceWithOrderNCHW()
: RunOnDeviceWithOrderNHWC();
}
bool RunOnDeviceWithOrderNCHW();
bool RunOnDeviceWithOrderNHWC();
private:
const StorageOrder order_;
const int group_;
};
template <typename T, class Context>
class ChannelShuffleGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ChannelShuffleGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))),
OP_SINGLE_ARG(int, "group", group_, 1) {
CAFFE_ENFORCE_NE(order_, StorageOrder::UNKNOWN);
}
bool RunOnDevice() override {
return order_ == StorageOrder::NCHW ? RunOnDeviceWithOrderNCHW()
: RunOnDeviceWithOrderNHWC();
}
bool RunOnDeviceWithOrderNCHW();
bool RunOnDeviceWithOrderNHWC();
private:
const StorageOrder order_;
const int group_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CHANNEL_SHUFFLE_OP_H_
| 1,902
| 26.57971
| 77
|
h
|
null |
pytorch-main/caffe2/operators/channel_stats_op.h
|
#ifndef CAFFE2_OPERATORS_CHANNEL_STATS_OP_H_
#define CAFFE2_OPERATORS_CHANNEL_STATS_OP_H_
#include <string>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class ChannelStatsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ChannelStatsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))) {
CAFFE_ENFORCE_NE(order_, StorageOrder::UNKNOWN);
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& X = Input(0);
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1);
const int HxW = X.numel() / (N * C);
auto* sum = Output(0, {C}, at::dtype<T>());
auto* sumsq = Output(1, {C}, at::dtype<T>());
const T* X_data = X.template data<T>();
T* sum_data = sum->template mutable_data<T>();
T* sumsq_data = sumsq->template mutable_data<T>();
return order_ == StorageOrder::NCHW
? ComputeChannelStatsNCHW<T>(N, C, HxW, X_data, sum_data, sumsq_data)
: ComputeChannelStatsNHWC<T>(N, C, HxW, X_data, sum_data, sumsq_data);
}
private:
template <typename T>
bool
ComputeChannelStatsNCHW(int N, int C, int HxW, const T* X, T* sum, T* sumsq);
template <typename T>
bool
ComputeChannelStatsNHWC(int N, int C, int HxW, const T* X, T* sum, T* sumsq);
const StorageOrder order_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CHANNEL_STATS_OP_H_
| 1,807
| 28.639344
| 80
|
h
|
null |
pytorch-main/caffe2/operators/clip_op.h
|
#ifndef CAFFE2_OPERATORS_CLIP_OP_H_
#define CAFFE2_OPERATORS_CLIP_OP_H_
#include <limits>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class ClipOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ClipOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
min_(std::numeric_limits<T>::lowest()),
max_(std::numeric_limits<T>::max()) {
if (HasArgument("min")) {
min_ = static_cast<T>(this->template GetSingleArgument<float>("min", 0));
}
if (HasArgument("max")) {
max_ = static_cast<T>(this->template GetSingleArgument<float>("max", 0));
}
}
bool RunOnDevice() override;
protected:
T min_;
T max_;
};
template <typename T, class Context>
class ClipGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ClipGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
min_(std::numeric_limits<T>::lowest()),
max_(std::numeric_limits<T>::max()) {
if (HasArgument("min")) {
min_ = static_cast<T>(this->template GetSingleArgument<float>("min", 0));
}
if (HasArgument("max")) {
max_ = static_cast<T>(this->template GetSingleArgument<float>("max", 0));
}
}
bool RunOnDevice() override;
protected:
T min_;
T max_;
// Input: Y, dY; Output: dX
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CLIP_OP_H_
| 1,639
| 24.230769
| 79
|
h
|
null |
pytorch-main/caffe2/operators/collect_and_distribute_fpn_rpn_proposals_op.h
|
#ifndef CAFFE2_OPERATORS_COLLECT_AND_DISTRIBUTE_FPN_RPN_PROPOSALS_OP_H_
#define CAFFE2_OPERATORS_COLLECT_AND_DISTRIBUTE_FPN_RPN_PROPOSALS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(CollectAndDistributeFpnRpnProposals);
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(CollectRpnProposals);
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(DistributeFpnProposals);
namespace caffe2 {
namespace utils {
// Compute the area of an array of boxes.
ERArrXXf BoxesArea(const ERArrXXf& boxes, const bool legacy_plus_one = false);
// Determine which FPN level each RoI in a set of RoIs should map to based
// on the heuristic in the FPN paper.
ERArrXXf MapRoIsToFpnLevels(
Eigen::Ref<const ERArrXXf> rois,
const float k_min,
const float k_max,
const float s0,
const float lvl0,
const bool legacy_plus_one = false);
// Sort RoIs from highest to lowest individual RoI score based on
// values from scores array and limit to n results
void SortAndLimitRoIsByScores(
Eigen::Ref<const EArrXf> scores,
int n,
ERArrXXf& rois);
// Updates arr to be indices that would sort the array. Implementation of
// https://docs.scipy.org/doc/numpy/reference/generated/numpy.argsort.html
void ArgSort(EArrXi& arr);
// Update out_filtered and out_indices with rows from rois where lvl matches
// value in lvls passed in.
void RowsWhereRoILevelEquals(
Eigen::Ref<const ERArrXXf> rois,
const ERArrXXf& lvls,
const int lvl,
ERArrXXf* out_filtered,
EArrXi* out_indices);
} // namespace utils
// C++ implementation of CollectAndDistributeFpnRpnProposalsOp
// Merge RPN proposals generated at multiple FPN levels and then
// distribute those proposals to their appropriate FPN levels for Faster
// RCNN. An anchor at one FPN level may predict an RoI that will map to
// another level, hence the need to redistribute the proposals.
// Reference:
// facebookresearch/Detectron/detectron/ops/collect_and_distribute_fpn_rpn_proposals.py
template <class Context>
class CollectAndDistributeFpnRpnProposalsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CollectAndDistributeFpnRpnProposalsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
roi_canonical_scale_(
this->template GetSingleArgument<int>("roi_canonical_scale", 224)),
roi_canonical_level_(
this->template GetSingleArgument<int>("roi_canonical_level", 4)),
roi_max_level_(
this->template GetSingleArgument<int>("roi_max_level", 5)),
roi_min_level_(
this->template GetSingleArgument<int>("roi_min_level", 2)),
rpn_max_level_(
this->template GetSingleArgument<int>("rpn_max_level", 6)),
rpn_min_level_(
this->template GetSingleArgument<int>("rpn_min_level", 2)),
rpn_post_nms_topN_(
this->template GetSingleArgument<int>("rpn_post_nms_topN", 2000)),
legacy_plus_one_(
this->template GetSingleArgument<bool>("legacy_plus_one", true)) {
CAFFE_ENFORCE_GE(
roi_max_level_,
roi_min_level_,
"roi_max_level " + c10::to_string(roi_max_level_) +
" must be greater than or equal to roi_min_level " +
c10::to_string(roi_min_level_) + ".");
CAFFE_ENFORCE_GE(
rpn_max_level_,
rpn_min_level_,
"rpn_max_level " + c10::to_string(rpn_max_level_) +
" must be greater than or equal to rpn_min_level " +
c10::to_string(rpn_min_level_) + ".");
}
~CollectAndDistributeFpnRpnProposalsOp() override {}
bool RunOnDevice() override;
protected:
// ROI_CANONICAL_SCALE
int roi_canonical_scale_{224};
// ROI_CANONICAL_LEVEL
int roi_canonical_level_{4};
// ROI_MAX_LEVEL
int roi_max_level_{5};
// ROI_MIN_LEVEL
int roi_min_level_{2};
// RPN_MAX_LEVEL
int rpn_max_level_{6};
// RPN_MIN_LEVEL
int rpn_min_level_{2};
// RPN_POST_NMS_TOP_N
int rpn_post_nms_topN_{2000};
// The infamous "+ 1" for box width and height dating back to the DPM days
bool legacy_plus_one_{true};
};
template <class Context>
class CollectRpnProposalsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CollectRpnProposalsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
rpn_max_level_(
this->template GetSingleArgument<int>("rpn_max_level", 6)),
rpn_min_level_(
this->template GetSingleArgument<int>("rpn_min_level", 2)),
rpn_post_nms_topN_(
this->template GetSingleArgument<int>("rpn_post_nms_topN", 2000)) {
CAFFE_ENFORCE_GE(
rpn_max_level_,
rpn_min_level_,
"rpn_max_level " + c10::to_string(rpn_max_level_) +
" must be greater than or equal to rpn_min_level " +
c10::to_string(rpn_min_level_) + ".");
}
~CollectRpnProposalsOp() override {}
bool RunOnDevice() override;
protected:
// RPN_MAX_LEVEL
int rpn_max_level_{6};
// RPN_MIN_LEVEL
int rpn_min_level_{2};
// RPN_POST_NMS_TOP_N
int rpn_post_nms_topN_{2000};
};
template <class Context>
class DistributeFpnProposalsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit DistributeFpnProposalsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
roi_canonical_scale_(
this->template GetSingleArgument<int>("roi_canonical_scale", 224)),
roi_canonical_level_(
this->template GetSingleArgument<int>("roi_canonical_level", 4)),
roi_max_level_(
this->template GetSingleArgument<int>("roi_max_level", 5)),
roi_min_level_(
this->template GetSingleArgument<int>("roi_min_level", 2)),
legacy_plus_one_(
this->template GetSingleArgument<bool>("legacy_plus_one", true)) {
CAFFE_ENFORCE_GE(
roi_max_level_,
roi_min_level_,
"roi_max_level " + c10::to_string(roi_max_level_) +
" must be greater than or equal to roi_min_level " +
c10::to_string(roi_min_level_) + ".");
}
~DistributeFpnProposalsOp() override {}
bool RunOnDevice() override;
protected:
// ROI_CANONICAL_SCALE
int roi_canonical_scale_{224};
// ROI_CANONICAL_LEVEL
int roi_canonical_level_{4};
// ROI_MAX_LEVEL
int roi_max_level_{5};
// ROI_MIN_LEVEL
int roi_min_level_{2};
// The infamous "+ 1" for box width and height dating back to the DPM days
bool legacy_plus_one_{true};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_COLLECT_AND_DISTRIBUTE_FPN_RPN_PROPOSALS_OP_H_
| 6,875
| 33.552764
| 87
|
h
|
null |
pytorch-main/caffe2/operators/concat_split_op.h
|
#ifndef CAFFE2_OPERATORS_CONCAT_SPLIT_OP_H_
#define CAFFE2_OPERATORS_CONCAT_SPLIT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/string_utils.h"
#include <c10/util/accumulate.h>
#include <c10/util/irange.h>
namespace caffe2 {
template <class Context>
class SplitOp final : public Operator<Context> {
public:
static const int kSplitOpInputSize = 2;
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SplitOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
split_(this->template GetRepeatedArgument<int>("split")) {
CAFFE_ENFORCE(
!(OperatorBase::HasArgument("axis") &&
OperatorBase::HasArgument("order")),
"You shouldn't specify both the dim to split, and the order "
"in the case of 4-D images.");
if (OperatorBase::HasArgument("axis")) {
axis_ = this->template GetSingleArgument<int>("axis", -1);
// only exists for computing the gradient of a Concat with 'add_axis'
add_axis_ = this->template GetSingleArgument<int>("add_axis", 0);
} else {
axis_ = GetDimFromOrderString(
this->template GetSingleArgument<string>("order", "NCHW"));
add_axis_ = 0;
}
}
bool RunOnDevice() override;
protected:
int axis_;
int add_axis_;
vector<int> split_;
// Input: X, optionally split
// The split tensor is stored in CPU.
};
template <class Context>
class SplitByLengthsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SplitByLengthsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
CAFFE_ENFORCE(
!(OperatorBase::HasArgument("axis") &&
OperatorBase::HasArgument("order")),
"You shouldn't specify both the dim to split, and the order "
"in the case of 4-D images.");
if (OperatorBase::HasArgument("axis")) {
axis_ = this->template GetSingleArgument<int>("axis", 0);
} else {
axis_ = GetDimFromOrderString(
this->template GetSingleArgument<string>("order", "NCHW"));
}
scaling_ =
this->template GetSingleArgument<bool>("use_scaling_lengths", false);
}
bool RunOnDevice() override;
protected:
int axis_;
bool scaling_;
Tensor inclusive_scan_buffer_{Context::GetDeviceType()};
Tensor inclusive_scan_length_buffer_{Context::GetDeviceType()};
// Input: X, optionally split
// The split tensor is stored in CPU.
Tensor lengths_host_{CPU};
};
template <class Context>
class ConcatOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ConcatOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
CAFFE_ENFORCE(
!(OperatorBase::HasArgument("axis") &&
OperatorBase::HasArgument("order")),
"You shouldn't specify both the dim to concat, and the order "
"in the case of 4-D images.");
if (OperatorBase::HasArgument("axis")) {
axis_ = this->template GetSingleArgument<int>("axis", -1);
add_axis_ = this->template GetSingleArgument<int>("add_axis", 0);
} else {
axis_ = GetDimFromOrderString(
this->template GetSingleArgument<string>("order", "NCHW"));
add_axis_ = 0;
}
}
bool RunOnDevice() override;
protected:
int axis_;
int add_axis_;
// Input: a number of tensors. Output: Y, split
// The split are stored in CPU.
};
// Implementations
template <class Context>
bool SplitOp<Context>::RunOnDevice() {
auto& input = Input(0);
int canonical_axis = input.canonical_axis_index(axis_);
CAFFE_ENFORCE_LT(
canonical_axis, input.dim(), "Axis not in input ndim range.");
const int input_channels = input.dim32(canonical_axis);
const int* axis_data;
vector<int> equal_split;
if (InputSize() == kSplitOpInputSize) {
// We obtain split from the input tensor.
CAFFE_ENFORCE_EQ(
split_.size(),
0,
"If you set split with an input blob, do not pass in "
"split in the argument.");
auto& split_tensor = this->template Input<Tensor>(1, CPU);
CAFFE_ENFORCE_EQ(split_tensor.numel(), OutputSize());
axis_data = split_tensor.template data<int>();
} else if (split_.size() == 0) {
CAFFE_ENFORCE_EQ(
input_channels % OutputSize(),
0,
"If you did not specify split explicitly, the number of "
"input channels:",
input_channels,
" should be divisible by the output size:",
OutputSize(),
".");
equal_split.resize(OutputSize(), input_channels / OutputSize());
axis_data = equal_split.data();
} else {
// We obtain split from the parameters.
CAFFE_ENFORCE_EQ(
split_.size(),
OutputSize(),
"The number of splits specified should be equal to the "
"number of outputs.");
axis_data = split_.data();
}
CAFFE_ENFORCE_EQ(
add_axis_ ? OutputSize()
: std::accumulate(axis_data, axis_data + OutputSize(), 0),
input_channels,
"Sum of split dimensions do not match: should be ",
input_channels);
vector<int64_t> output_dims(input.sizes().vec());
int before = 1, after = 1;
for (const auto i : c10::irange(canonical_axis)) {
before *= input.dim32(i);
}
for (int i = canonical_axis + 1; i < input.dim(); ++i) {
after *= input.dim32(i);
}
if (add_axis_) {
output_dims.erase(output_dims.begin() + canonical_axis);
}
const auto *const input_ptr = static_cast<const char*>(input.raw_data());
size_t input_offset = 0;
for (const auto i : c10::irange(OutputSize())) {
auto *const output = Output(i);
const auto axis_dim = add_axis_ ? 1 : axis_data[i];
if (!add_axis_) {
output_dims[canonical_axis] = axis_data[i];
}
output->Resize(output_dims);
// We need `output_ptr` before the early exit since
// `raw_mutable_data` sets the output's data type
auto *const output_ptr = output->raw_mutable_data(input.dtype());
if (input_ptr == nullptr || output_ptr == nullptr) {
continue;
}
math::CopyMatrix<Context>(
input.itemsize(),
before,
axis_dim * after,
input_ptr + input_offset,
input.dim32(canonical_axis) * after,
output_ptr,
axis_dim * after,
&context_,
input.dtype().copy());
input_offset += axis_dim * after * input.itemsize();
}
return true;
}
// Implementations
template <class Context>
bool SplitByLengthsOp<Context>::RunOnDevice() {
auto& input = Input(0);
auto lengths_length = Input(1).dim(0);
int32_t* length_data;
if (this->InputIsTensorType(1, CPU)) {
length_data = Input(1).template data<int32_t>();
} else {
// Length input in CUDA context
auto& input_length = Input(1);
lengths_host_ = TensorCPU(input_length, CPU);
length_data = lengths_host_.template data<int32_t>();
}
CAFFE_ENFORCE_EQ(
lengths_length % OutputSize(),
0,
"len(Lengths) ",
lengths_length,
"should be divisible by OutputSize() ",
OutputSize(),
".");
int canonical_axis = input.canonical_axis_index(axis_);
CAFFE_ENFORCE_LT(
canonical_axis, input.dim(), "Axis not in input ndim range.");
const int input_channels = input.dim32(canonical_axis);
const auto* axis_data = length_data;
auto sum_lengths = std::accumulate(axis_data, axis_data + lengths_length, 0);
if (scaling_) {
CAFFE_ENFORCE_EQ(
input_channels % (sum_lengths ? sum_lengths : 1),
0,
"Input channels ",
input_channels,
" should be divisible by ",
sum_lengths);
} else {
CAFFE_ENFORCE_EQ(
sum_lengths,
input_channels,
"Input channels should be equal to split dimensions sum, ",
input_channels,
" vs ",
sum_lengths);
}
vector<int64_t> output_dims(input.sizes().vec());
int before = input.size_to_dim(canonical_axis);
int after = input.size_from_dim(canonical_axis + 1);
size_t input_offset = 0;
auto dim_multiplier = sum_lengths ? (input_channels / sum_lengths) : 1;
if (!scaling_) {
dim_multiplier = 1;
}
for (const auto i : c10::irange(OutputSize())) {
auto* output = Output(i);
const auto* axis_offset = axis_data + lengths_length / OutputSize() * i;
auto axis_dim =
dim_multiplier *
std::accumulate(
axis_offset, axis_offset + lengths_length / OutputSize(), 0);
output_dims[canonical_axis] = axis_dim;
output->Resize(output_dims);
math::CopyMatrix<Context>(
input.itemsize(),
before,
axis_dim * after,
static_cast<const char*>(input.raw_data()) + input_offset,
input.dim32(canonical_axis) * after,
output->raw_mutable_data(input.dtype()),
axis_dim * after,
&context_,
input.dtype().copy());
input_offset += axis_dim * after * input.itemsize();
}
return true;
}
template <class Context>
bool ConcatOp<Context>::RunOnDevice() {
auto *const output = Output(0);
// We can override default options(Context::GetDeviceType())
// by explicitly passing in device type we want
Tensor *const split = Output(
1, at::IntArrayRef({InputSize()}), at::dtype<int>().device(CPU));
int *const axis_data = split->template mutable_data<int>();
auto& input_zero = Input(0);
int adj_size = input_zero.dim() + (add_axis_ ? 1 : 0);
int canonical_axis = canonical_axis_index_(axis_, adj_size);
CAFFE_ENFORCE_LT(canonical_axis, adj_size, "Axis not in input ndim range.");
for (const auto i : c10::irange(1, InputSize())) {
CAFFE_ENFORCE_EQ(
Input(i).dtype(),
input_zero.dtype(),
"All inputs must have the same type, expected: ",
input_zero.dtype().name(),
" but got: ",
Input(i).dtype().name(),
" for input: ",
i);
}
int before = 1, after = 1;
vector<int64_t> output_dims(input_zero.sizes().vec());
for (const auto i : c10::irange(input_zero.dim())) {
if (i == canonical_axis && !add_axis_) {
continue;
}
int dim = input_zero.dim32(i);
if (i < canonical_axis) {
before *= dim;
} else { // i > canonical_axis || i == canonical_axis && add_axis_
after *= dim;
}
// check the input dims are compatible.
for (const auto j : c10::irange(1, InputSize())) {
int dim_j = Input(j).dim32(i);
CAFFE_ENFORCE_EQ(
dim,
dim_j,
"Expect dimension = ",
dim,
" got ",
dim_j,
" at axis = ",
i,
" for input: ",
j,
". The input tensors can only have different dimensions "
"when arg 'add_axis' = 0 and along the axis = ",
canonical_axis,
" <",
Input(0).sizes(),
"> vs <",
Input(j).sizes(),
">.");
}
}
int output_channels = 0;
for (const auto i : c10::irange(InputSize())) {
axis_data[i] = add_axis_ ? 1 : Input(i).dim32(canonical_axis);
output_channels += axis_data[i];
}
if (add_axis_) {
output_dims.insert(output_dims.begin() + canonical_axis, output_channels);
} else {
output_dims[canonical_axis] = output_channels;
}
output->Resize(output_dims);
auto *const output_ptr = static_cast<char*>(output->raw_mutable_data(input_zero.dtype()));
if(output_ptr == nullptr){
return true;
}
size_t output_offset = 0;
for (const auto i : c10::irange(InputSize())) {
auto& input = Input(i);
auto axis_dim = add_axis_ ? 1 : input.dim32(canonical_axis);
math::CopyMatrix<Context>(
input.itemsize(),
before,
axis_dim * after,
input.raw_data(),
axis_dim * after,
output_ptr + output_offset,
output_channels * after,
&context_,
input_zero.dtype().copy());
output_offset += axis_dim * after * input.itemsize();
}
return true;
}
OpSchema::Cost CostInferenceForConcat(
const OperatorDef& def,
const std::vector<TensorShape>& in);
std::vector<TensorShape> TensorInferenceForConcat(
const OperatorDef& def,
const std::vector<TensorShape>& in);
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CONCAT_SPLIT_OP_H_
| 12,369
| 29.84788
| 92
|
h
|
null |
pytorch-main/caffe2/operators/conv_op.h
|
#ifndef CAFFE2_OPERATORS_CONV_OP_H_
#define CAFFE2_OPERATORS_CONV_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_op_shared.h"
#include "caffe2/operators/conv_pool_op_base.h"
C10_DECLARE_bool(caffe2_force_shared_col_buffer);
namespace caffe2 {
template <typename T, class Context>
class ConvOp final : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
explicit ConvOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<Context>(operator_def, ws) {
// Since this is the default convolution implementation, we will
// use CAFFE_ENFORCE instead of OPERATOR_NEEDS_FEATURE.
CAFFE_ENFORCE(
(group_ == 1 || order_ == StorageOrder::NCHW ||
std::is_same<Context, CPUContext>::value),
"Group convolution only supports NCHW order or CPUContext right now.");
// Create shared buffer mutex in the constructor
// to avoid race-condition in DAGNet.
if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {
createSharedBuffer<Context>(ws_);
}
}
~ConvOp() override {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
bool Run1x1ConvOnDeviceWithOrderNCHW(
const int N,
const int C,
const int HxW,
const int M,
const T* X,
const T* filter,
const T* bias,
T* Y);
bool Run1x1ConvOnDeviceWithOrderNHWC(
const int N,
const int C,
const int HxW,
const int M,
const T* X,
const T* filter,
const T* bias,
T* Y);
Tensor col_buffer_{Context::GetDeviceType()};
Tensor bias_multiplier_{Context::GetDeviceType()};
Tensor img_shape_device_{Context::GetDeviceType()};
Tensor col_buffer_shape_device_{Context::GetDeviceType()};
// Input: X, W, b
// Output: Y
INPUT_TAGS(INPUT, FILTER, BIAS);
};
template <typename T, class Context>
class ConvGradientOp final : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
explicit ConvGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<Context>(operator_def, ws),
no_bias_(this->template GetSingleArgument<int>("no_bias", 0)) {
CAFFE_ENFORCE(
!(no_bias_ && OutputSize() == 3),
"If bias is not present, you should not have 3 grad output.");
CAFFE_ENFORCE(
(group_ == 1 || order_ == StorageOrder::NCHW ||
std::is_same<Context, CPUContext>::value),
"Group convolution only supports NCHW order or CPUContext right now.");
}
~ConvGradientOp() override {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
Tensor col_buffer_;
Tensor bias_multiplier_;
Tensor img_shape_device_{Context::GetDeviceType()};
Tensor col_buffer_shape_device_{Context::GetDeviceType()};
bool no_bias_;
// input: X, W, dY
// output: dW, db, and optionally dX
INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD);
OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CONV_OP_H_
| 3,143
| 29.823529
| 79
|
h
|
null |
pytorch-main/caffe2/operators/conv_op_cache_cudnn.h
|
#ifndef CAFFE2_OPERATORS_CONV_OP_CACHE_H_
#define CAFFE2_OPERATORS_CONV_OP_CACHE_H_
#include <functional>
#include <unordered_map>
#include <vector>
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
template <typename TAlgorithm>
class AlgorithmsCache {
public:
// Caches the best algorithm for a given
// combination of tensor dimensions & compute data type.
//
TAlgorithm getAlgorithm(
at::IntArrayRef tensorDimensions1,
at::IntArrayRef tensorDimensions2,
int algorithmFlags, // Differentiate between algorithms with different
// parameters in a generic way
std::function<TAlgorithm()> generatingFunc);
private:
std::unordered_map<int64_t, TAlgorithm> hash_;
};
template <typename TAlgorithm>
TAlgorithm AlgorithmsCache<TAlgorithm>::getAlgorithm(
at::IntArrayRef tensorDimensions1,
at::IntArrayRef tensorDimensions2,
int algorithmFlags,
std::function<TAlgorithm()> generatingFunc) {
int64_t seed = 0;
// Hash all of the inputs, which we wiill then use to try and look up
// a previously discovered algorithm, or fall back to generating a new one.
std::hash<int64_t> hashFn;
for (const auto num : tensorDimensions1) {
// Copied from boost::hash_combine.
// Adding 1 to differentiate between first and second vector.
seed ^= hashFn(num) + 0x9e3779b9 + (seed << 6) + (seed >> 2) + 1;
}
for (const auto num : tensorDimensions2) {
// Copied from boost::hash_combine.
seed ^= hashFn(num) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
// Adding 2 to differentiate from previous vectors
seed ^= hashFn(algorithmFlags) + 0x9e3779b9 + (seed << 6) + (seed >> 2) + 2;
if (seed == 0) {
return generatingFunc();
}
if (hash_.find(seed) == hash_.end()) {
TAlgorithm value = generatingFunc();
hash_[seed] = value;
}
return hash_[seed];
}
} // namespace caffe2
#endif
| 1,935
| 27.895522
| 78
|
h
|
null |
pytorch-main/caffe2/operators/conv_op_shared.h
|
#ifndef CAFFE2_OPERATORS_CONV_OP_SHARED_H_
#define CAFFE2_OPERATORS_CONV_OP_SHARED_H_
#include "caffe2/core/context.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/workspace.h"
namespace caffe2 {
/**
* Creates a mutex and shared buffer in the workspace.
* Not thread-safe, must be called from the constructor.
*/
template <typename Context>
void createSharedBuffer(Workspace* ws);
/**
* Thread-safe, can be invoked from RunOnDevice() to serialize
* access to shared buffer.
*/
template <typename Context>
void runWithSharedBuffer(Workspace* ws, std::function<void(Tensor* buffer)> f);
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CONV_OP_SHARED_H_
| 672
| 24.884615
| 79
|
h
|
null |
pytorch-main/caffe2/operators/conv_transpose_op.h
|
#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_H_
#define CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_transpose_unpool_op_base.h"
namespace caffe2 {
template <typename T, class Context>
class ConvTransposeOp final : public ConvTransposeUnpoolBase<Context> {
public:
USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);
template <class... Args>
explicit ConvTransposeOp(Args&&... args)
: ConvTransposeUnpoolBase<Context>(std::forward<Args>(args)...) {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
Tensor col_buffer_;
Tensor bias_multiplier_;
// Input: X, W, b
// Output: Y
INPUT_TAGS(INPUT, FILTER, BIAS);
};
template <typename T, class Context>
class ConvTransposeGradientOp final : public ConvTransposeUnpoolBase<Context> {
public:
USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);
template <class... Args>
explicit ConvTransposeGradientOp(Args&&... args)
: ConvTransposeUnpoolBase<Context>(std::forward<Args>(args)...),
no_bias_(this->template GetSingleArgument<bool>("no_bias", false)) {
CAFFE_ENFORCE(
!(no_bias_ && OutputSize() == 3),
"If bias is not present, you should not have 3 grad output.");
}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
Tensor col_buffer_;
Tensor bias_multiplier_;
const bool no_bias_;
// input: X, W, dY
// output: dW, optionally db and dX
INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD);
OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_H_
| 1,727
| 28.793103
| 79
|
h
|
null |
pytorch-main/caffe2/operators/conv_transpose_op_impl.h
|
// conv_transpose_op_impl.h is the templated implementation of the
// conv_transpose_op.h file.
#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_IMPL_H_
#define CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_IMPL_H_
#include "caffe2/operators/conv_transpose_op.h"
#include <array>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_op_shared.h"
#include "caffe2/operators/conv_transpose_unpool_op_base.h"
#include "caffe2/utils/math.h"
C10_DECLARE_bool(caffe2_force_shared_col_buffer);
namespace caffe2 {
template <typename T, class Context>
bool ConvTransposeOp<T, Context>::RunOnDeviceWithOrderNCHW() {
const auto& X = Input(INPUT);
const auto& filter = Input(FILTER);
CAFFE_ENFORCE_EQ(X.dim(), 4, "Input must be 4D tensor");
CAFFE_ENFORCE_EQ(filter.dim(), 4, "filter must be 4D tensor");
const int N = X.dim32(0);
const int M = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const int G = group_;
CAFFE_ENFORCE_EQ(M, filter.dim32(0));
CAFFE_ENFORCE_EQ(
M % G, 0, "The number of input channels is not divisible by group.");
const int C = filter.dim32(1) * G;
CAFFE_ENFORCE_EQ(
filter.dim32(2),
kernel_h(),
"filter height must be equal to kernel height");
CAFFE_ENFORCE_EQ(
filter.dim32(3),
this->kernel_w(),
"filter width must be equal to kernel width");
const std::vector<std::int64_t> Y_dims =
ConvTransposeUnpoolBase<Context>::GetOutputSize(X, C);
auto* Y = Output(0, Y_dims, at::dtype<T>());
if (X.numel() == 0) {
VLOG(2) << "Number of elements is 0 in ConvTransposeOp";
return true;
}
const int K_HxW = kernel_h() * kernel_w();
const int kernel_dim = C / G * K_HxW;
const int X_HxW = H * W;
const int Y_HxW = Y->dim32(2) * Y->dim32(3);
const T* X_data = X.template data<T>();
const T* filter_data = filter.template data<T>();
const T* bias_data = nullptr;
if (InputSize() == 3) {
auto& bias = Input(BIAS);
CAFFE_ENFORCE_EQ(bias.dim(), 1, "bias must be 1D tensor");
CAFFE_ENFORCE_EQ(
bias.dim32(0),
C,
"bias dimension must be equal to output channel number");
bias_data = bias.template data<T>();
}
T* Y_data = Y->template mutable_data<T>();
const std::vector<std::int64_t> buffer_shape = {
C, kernel_h(), kernel_w(), H, W};
const auto func = [&](Tensor* col_buffer) {
ReinitializeTensor(
col_buffer,
buffer_shape,
at::dtype<T>().device(Context::GetDeviceType()));
T* col_buffer_data = col_buffer->template mutable_data<T>();
for (const auto image_id : c10::irange(N)) {
// Weight term
if (G == 1) {
math::Gemm<T, Context>(
CblasTrans,
CblasNoTrans,
kernel_dim,
X_HxW,
M,
1.0f,
filter_data,
X_data + image_id * M * X_HxW,
0.0f,
col_buffer_data,
&context_);
} else {
math::GemmStridedBatched<T, Context>(
CblasTrans,
CblasNoTrans,
G,
kernel_dim,
X_HxW,
M / G,
1.0f,
filter_data,
M / G * kernel_dim,
X_data + image_id * M * X_HxW,
M / G * X_HxW,
0.0f,
col_buffer_data,
col_buffer->numel() / G,
&context_);
}
// Col2Im
math::Col2Im<T, Context, StorageOrder::NCHW>(
C,
Y->dim32(2),
Y->dim32(3),
kernel_h(),
kernel_w(),
1,
1,
pad_t(),
pad_l(),
pad_b(),
pad_r(),
stride_h(),
stride_w(),
col_buffer_data,
Y_data + image_id * C * Y_HxW,
&context_);
if (bias_data != nullptr) {
// Bias term
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
math::BiasCHW<T, Context>(
bias_data,
nullptr,
C,
Y_HxW,
Y_data + image_id * C * Y_HxW,
&context_);
#endif // !defined(__ARM_NEON__) && !defined(__ARM_NEON)
}
}
if (bias_data != nullptr) {
#if !defined(__ARM_NEON__) && !defined(__ARM_NEON)
// Bias term
const std::array<int, 3> Y_dims = {N, C, Y_HxW};
const std::array<int, 3> b_dims = {1, C, 1};
math::Add<T, Context>(
3,
Y_dims.data(),
3,
b_dims.data(),
Y_data,
bias_data,
Y_data,
&context_);
#endif
}
};
if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {
runWithSharedBuffer<Context>(ws_, func);
} else {
func(&col_buffer_);
}
return true;
}
template <typename T, class Context>
bool ConvTransposeOp<T, Context>::RunOnDeviceWithOrderNHWC() {
const auto& X = Input(INPUT);
auto& filter = Input(FILTER);
CAFFE_ENFORCE_EQ(filter.dim(), 4, "filter must be 4D tensor");
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int M = X.dim32(3);
const int G = group_;
CAFFE_ENFORCE_EQ(
filter.dim32(0),
M,
"filter number must be equal to input channel number");
CAFFE_ENFORCE_EQ(
M % G, 0, "The number of input channels is not divisible by group.");
const int C = filter.dim32(3) * G;
CAFFE_ENFORCE_EQ(
filter.dim32(1),
kernel_h(),
"filter height must be equal to kernel height");
CAFFE_ENFORCE_EQ(
filter.dim32(2),
kernel_w(),
"filter width must be equal to kernel width");
const std::vector<std::int64_t> Y_dims =
ConvTransposeUnpoolBase<Context>::GetOutputSize(X, C);
auto* Y = Output(0, Y_dims, at::dtype<T>());
if (X.numel() == 0) {
VLOG(2) << "Number of elements is 0 in ConvTransposeOp";
return true;
}
const int K_HxW = kernel_h() * kernel_w();
const int kernel_dim = C / G * K_HxW;
const int X_HxW = H * W;
const int Y_HxW = Y->dim32(1) * Y->dim32(2);
const T* X_data = X.template data<T>();
const T* filter_data = filter.template data<T>();
const T* bias_data = nullptr;
if (InputSize() == 3) {
auto& bias = Input(BIAS);
CAFFE_ENFORCE_EQ(bias.dim(), 1, "bias must be 1D tensor");
CAFFE_ENFORCE_EQ(
bias.dim32(0),
C,
"bias dimension must be equal to output channel number");
bias_data = bias.template data<T>();
}
T* Y_data = Y->template mutable_data<T>();
const std::vector<std::int64_t> buffer_shape = {
G, H, W, kernel_h(), kernel_w(), C / G};
const auto func = [&](Tensor* /*col_buffer*/) {
ReinitializeTensor(
&col_buffer_,
buffer_shape,
at::dtype<T>().device(Context::GetDeviceType()));
T* col_buffer_data = col_buffer_.template mutable_data<T>();
for (const auto image_id : c10::irange(N)) {
// Weight term
if (G == 1) {
math::Gemm<T, Context>(
CblasNoTrans,
CblasNoTrans,
X_HxW,
kernel_dim,
M,
1.0f,
X_data + image_id * M * X_HxW,
filter_data,
0.0f,
col_buffer_data,
&context_);
} else {
for (const auto group_id : c10::irange(G)) {
math::GemmEx<T, Context>(
CblasNoTrans,
CblasNoTrans,
X_HxW,
kernel_dim,
M / G,
1.0f,
X_data + image_id * M * X_HxW + group_id * M / G,
M,
filter_data + group_id * M / G * kernel_dim,
kernel_dim,
0.0f,
col_buffer_data + group_id * kernel_dim,
G * kernel_dim,
&context_);
}
}
// Col2Im
math::Col2Im<T, Context, StorageOrder::NHWC>(
C,
Y->dim32(1),
Y->dim32(2),
kernel_h(),
kernel_w(),
1,
1,
pad_t(),
pad_l(),
pad_b(),
pad_r(),
stride_h(),
stride_w(),
col_buffer_data,
Y_data + image_id * C * Y_HxW,
&context_,
G);
}
if (bias_data != nullptr) {
// Bias term
const std::array<int, 2> Y_dims = {N * Y_HxW, C};
const std::array<int, 2> b_dims = {1, C};
math::Add<T, Context>(
2,
Y_dims.data(),
2,
b_dims.data(),
Y_data,
bias_data,
Y_data,
&context_);
}
};
if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {
runWithSharedBuffer<Context>(ws_, func);
} else {
func(&col_buffer_);
}
return true;
}
template <typename T, class Context>
bool ConvTransposeGradientOp<T, Context>::RunOnDeviceWithOrderNCHW() {
const auto& X = Input(INPUT);
const auto& filter = Input(FILTER);
const auto& dY = Input(OUTPUT_GRAD);
CAFFE_ENFORCE_EQ(filter.dim(), 4);
const int N = X.dim32(0);
const int M = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const int G = group_;
CAFFE_ENFORCE_EQ(M, filter.dim32(0));
CAFFE_ENFORCE_EQ(
M % G, 0, "The number of input channels is not divisible by group.");
const int C = filter.dim32(1) * G;
CAFFE_ENFORCE_EQ(C, dY.dim32(1));
CAFFE_ENFORCE_EQ(
filter.dim32(2),
kernel_h(),
"filter height must be equal to kernel height");
CAFFE_ENFORCE_EQ(
filter.dim32(3),
this->kernel_w(),
"filter width must be equal to kernel width");
const int K_HxW = kernel_h() * kernel_w();
const int kernel_dim = C / G * K_HxW;
const int X_HxW = H * W;
const int Y_HxW = dY.dim32(2) * dY.dim32(3);
auto* dfilter = Output(FILTER_GRAD, filter.sizes(), at::dtype<T>());
const T* X_data = X.template data<T>();
const T* filter_data = filter.template data<T>();
const T* dY_data = dY.template data<T>();
T* dfilter_data = dfilter->template mutable_data<T>();
T* dbias_data = nullptr;
T* dX_data = nullptr;
if (!no_bias_) {
auto* dbias = Output(BIAS_OR_INPUT_GRAD, {C}, at::dtype<T>());
dbias_data = dbias->template mutable_data<T>();
}
const bool compute_dX =
(OutputSize() == 3) || (no_bias_ && (OutputSize() == 2));
if (compute_dX) {
auto* dX = Output(
no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD, X.sizes(), at::dtype<T>());
dX_data = dX->template mutable_data<T>();
}
math::Set<T, Context>(filter.numel(), T(0), dfilter_data, &context_);
if (X.numel() == 0) {
VLOG(2) << "Number of elements is 0 in ConvTransposeOp";
if (dbias_data != nullptr) {
math::Set<T, Context>(C, T(0), dbias_data, &context_);
}
return true;
}
ReinitializeTensor(
&col_buffer_,
std::vector<std::int64_t>{C, kernel_h(), kernel_w(), H, W},
at::dtype<T>().device(Context::GetDeviceType()));
T* col_buffer_data = col_buffer_.template mutable_data<T>();
for (const auto image_id : c10::irange(N)) {
// gradient w.r.t. filters. Im2Col followed by Gemm
// Im2Col.
math::Im2Col<T, Context, StorageOrder::NCHW>(
C,
dY.dim32(2),
dY.dim32(3),
kernel_h(),
kernel_w(),
1,
1,
pad_t(),
pad_l(),
pad_b(),
pad_r(),
stride_h(),
stride_w(),
dY_data + image_id * C * Y_HxW,
col_buffer_data,
&context_);
// Gemm
if (G == 1) {
math::Gemm<T, Context>(
CblasNoTrans,
CblasTrans,
M,
kernel_dim,
X_HxW,
1.0f,
X_data + image_id * M * X_HxW,
col_buffer_data,
1.0f,
dfilter_data,
&context_);
} else {
math::GemmStridedBatched<T, Context>(
CblasNoTrans,
CblasTrans,
G,
M / G,
kernel_dim,
X_HxW,
1.0f,
X_data + image_id * M * X_HxW,
M / G * X_HxW,
col_buffer_data,
col_buffer_.numel() / G,
1.0f,
dfilter_data,
M / G * kernel_dim,
&context_);
}
if (dX_data != nullptr) {
// Compute gradients w.r.t. the input
if (G == 1) {
math::Gemm<T, Context>(
CblasNoTrans,
CblasNoTrans,
M,
X_HxW,
kernel_dim,
1.0f,
filter_data,
col_buffer_data,
0.0f,
dX_data + image_id * M * X_HxW,
&context_);
} else {
math::GemmStridedBatched<T, Context>(
CblasNoTrans,
CblasNoTrans,
G,
M / G,
X_HxW,
kernel_dim,
1.0f,
filter_data,
M / G * kernel_dim,
col_buffer_data,
col_buffer_.numel() / G,
0.0f,
dX_data + image_id * M * X_HxW,
M / G * X_HxW,
&context_);
}
}
}
if (dbias_data != nullptr) {
// gradient w.r.t. bias
const std::array<int, 3> Y_dims = {N, C, Y_HxW};
const std::array<int, 3> b_dims = {1, C, 1};
math::ReduceSum<T, Context>(
3, Y_dims.data(), b_dims.data(), T(1), dY_data, dbias_data, &context_);
}
return true;
}
template <typename T, class Context>
bool ConvTransposeGradientOp<T, Context>::RunOnDeviceWithOrderNHWC() {
const auto& X = Input(INPUT);
const auto& filter = Input(FILTER);
const auto& dY = Input(OUTPUT_GRAD);
CAFFE_ENFORCE_EQ(filter.dim(), 4);
const int N = X.dim32(0);
const int H = X.dim32(1);
const int W = X.dim32(2);
const int M = X.dim32(3);
const int G = group_;
CAFFE_ENFORCE_EQ(M, filter.dim32(0));
CAFFE_ENFORCE_EQ(
M % G, 0, "The number of input channels is not divisible by group.");
const int C = filter.dim32(3) * G;
CAFFE_ENFORCE_EQ(C, dY.dim32(3));
CAFFE_ENFORCE_EQ(
filter.dim32(1),
kernel_h(),
"filter height must be equal to kernel height");
CAFFE_ENFORCE_EQ(
filter.dim32(2),
this->kernel_w(),
"filter width must be equal to kernel width");
CAFFE_ENFORCE_EQ(dY.dim32(3), C);
const int K_HxW = kernel_h() * kernel_w();
const int kernel_dim = C / G * K_HxW;
const int X_HxW = H * W;
const int Y_HxW = dY.dim32(1) * dY.dim32(2);
auto* dfilter = Output(FILTER_GRAD, filter.sizes(), at::dtype<T>());
const T* X_data = X.template data<T>();
const T* filter_data = filter.template data<T>();
const T* dY_data = dY.template data<T>();
T* dfilter_data = dfilter->template mutable_data<T>();
T* dbias_data = nullptr;
T* dX_data = nullptr;
if (!no_bias_) {
auto* dbias = Output(BIAS_OR_INPUT_GRAD, {C}, at::dtype<T>());
dbias_data = dbias->template mutable_data<T>();
}
const bool compute_dX =
(OutputSize() == 3) || (no_bias_ && (OutputSize() == 2));
if (compute_dX) {
auto* dX = Output(
no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD, X.sizes(), at::dtype<T>());
dX_data = dX->template mutable_data<T>();
}
math::Set<T, Context>(filter.numel(), T(0), dfilter_data, &context_);
if (X.numel() == 0) {
VLOG(2) << "Number of elements is 0 in ConvTransposeOp";
if (dbias_data != nullptr) {
math::Set<T, Context>(C, T(0), dbias_data, &context_);
}
return true;
}
ReinitializeTensor(
&col_buffer_,
std::vector<std::int64_t>{C, kernel_h(), kernel_w(), H, W},
at::dtype<T>().device(Context::GetDeviceType()));
T* col_buffer_data = col_buffer_.template mutable_data<T>();
for (const auto image_id : c10::irange(N)) {
// gradient w.r.t. filters. Im2Col followed by Gemm
// Im2Col.
math::Im2Col<T, Context, StorageOrder::NHWC>(
C,
dY.dim32(1),
dY.dim32(2),
kernel_h(),
kernel_w(),
1,
1,
pad_t(),
pad_l(),
pad_b(),
pad_r(),
stride_h(),
stride_w(),
dY_data + image_id * C * Y_HxW,
col_buffer_data,
&context_,
G);
// Gemm
if (G == 1) {
math::Gemm<T, Context>(
CblasTrans,
CblasNoTrans,
M,
kernel_dim,
X_HxW,
1.0f,
X_data + image_id * M * X_HxW,
col_buffer_data,
1.0f,
dfilter_data,
&context_);
} else {
for (const auto group_id : c10::irange(G)) {
math::GemmEx<T, Context>(
CblasTrans,
CblasNoTrans,
M / G,
kernel_dim,
X_HxW,
1.0f,
X_data + image_id * M * X_HxW + group_id * M / G,
M,
col_buffer_data + group_id * kernel_dim,
G * kernel_dim,
1.0f,
dfilter_data + group_id * M / G * kernel_dim,
kernel_dim,
&context_);
}
}
if (dX_data != nullptr) {
// Compute gradients w.r.t. the input
if (G == 1) {
math::Gemm<T, Context>(
CblasNoTrans,
CblasTrans,
X_HxW,
M,
kernel_dim,
1.0f,
col_buffer_data,
filter_data,
0.0f,
dX_data + image_id * M * X_HxW,
&context_);
} else {
for (const auto group_id : c10::irange(G)) {
math::GemmEx<T, Context>(
CblasNoTrans,
CblasTrans,
X_HxW,
M / G,
kernel_dim,
1.0f,
col_buffer_data + group_id * kernel_dim,
G * kernel_dim,
filter_data + group_id * M / G * kernel_dim,
kernel_dim,
0.0f,
dX_data + image_id * M * X_HxW + group_id * M / G,
M,
&context_);
}
}
}
}
if (dbias_data != nullptr) {
const std::array<int, 2> Y_dims = {N * Y_HxW, C};
const std::array<int, 2> b_dims = {1, C};
math::ReduceSum<T, Context>(
2, Y_dims.data(), b_dims.data(), T(1), dY_data, dbias_data, &context_);
}
return true;
}
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_OP_IMPL_H_
| 18,226
| 27.171561
| 79
|
h
|
null |
pytorch-main/caffe2/operators/conv_transpose_op_mobile.h
|
#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_
#define CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_
#include "caffe2/core/common.h"
#ifdef C10_MOBILE
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_transpose_unpool_op_base.h"
namespace caffe2 {
template <typename T, class Context>
class ConvTransposeMobileOp final : public ConvTransposeUnpoolBase<Context> {
public:
USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context);
ConvTransposeMobileOp(const OperatorDef& operator_def, Workspace* ws)
: ConvTransposeUnpoolBase<Context>(operator_def, ws) {
OPERATOR_NEEDS_FEATURE(
order_ == StorageOrder::NCHW,
"Only NCHW order is supported right now.");
OPERATOR_NEEDS_FEATURE(
this->pad_l() == 0, "operator does not handle row width padding");
OPERATOR_NEEDS_FEATURE(
this->pad_r() == 0, "operator does not handle row width padding");
OPERATOR_NEEDS_FEATURE(this->stride_w() <= 4, "stride width must be <= 4");
}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
// We store a numThreads per-worker tiles of Y, and numThreads per-worker
// threadBuffer for the gemm output, laid out in that order.
Tensor threadBuffer_{CPU};
// Input: X, W, b
// Output: Y
INPUT_TAGS(INPUT, FILTER, BIAS);
};
} // namespace caffe2
#endif // C10_MOBILE
#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_H_
| 1,469
| 29.625
| 79
|
h
|
null |
pytorch-main/caffe2/operators/conv_transpose_op_mobile_impl.h
|
// conv_transpose_op_impl.h is the templated implementation of the
// conv_transpose_op.h file.
#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_IMPL_H_
#define CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_IMPL_H_
#include "caffe2/core/common.h"
#ifdef C10_MOBILE
#include "caffe2/core/logging.h"
#include "caffe2/operators/conv_op_shared.h"
#include "caffe2/operators/conv_transpose_op_mobile.h"
#include "caffe2/utils/cpu_neon.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/fixed_divisor.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/utils.h"
C10_DECLARE_bool(caffe2_force_shared_col_buffer);
namespace caffe2 {
template <typename T, typename Context>
void runTileContiguous(
int tileId,
int N,
int M,
int H,
int W,
int outputH,
int outputW,
int C,
int kernelH,
int kernelW,
int strideH,
int strideW,
int padT,
const T* filterData,
const T* Xdata,
T* colBufferData,
T* Ydata,
Context* context) {
// The tile size is exactly the length of a single row
int tileSize = W;
auto kernelDataSize = C * kernelH * kernelW;
auto currentTileStart = tileSize * tileId;
// gemm tile
math::GemmEx<T, Context>(
CblasTrans,
CblasNoTrans,
kernelDataSize,
tileSize,
M,
1,
filterData,
kernelDataSize,
Xdata + currentTileStart,
H * W,
0,
colBufferData,
tileSize,
context);
// col2im tile
// We assume that there is no padding in the columns (padL and padR
// == 0).
// FIXME: it is actually possible for us to handle padding, figure
// out how to adjust the bounds
// We write into Y in a de-interleaved fashion; in other words,
// every column (mod strideW) == 0 together in one block,
// every column (mod strideW) == 1 in another,
// ... and so on.
int colBlockSize = (W + kernelW / strideW);
int numColBlocks = strideW;
for (const auto c : c10::irange(kernelDataSize)) {
int w_offset = c % kernelW;
int h_offset = (c / kernelW) % kernelH;
int c_im = c / kernelH / kernelW;
// Each row is a separate tile that we handle. First determine the
// row into which we are writing the output.
// We can properly handle padding for the rows.
int rowY = tileId * strideH - padT + h_offset;
// If this row is out of bounds, then skip it
if (!math::utils::IsAGeZeroAndALtB(rowY, outputH)) {
continue;
}
// FIXME: we don't actually handle a dynamic padL > 0
constexpr int kPadL = 0;
int colOffsetStart = -kPadL + w_offset;
int colBlockY = colOffsetStart % strideW;
// However, within a block we may not start writing at offset
// 0. The offset at which we begin writing is determined by
// colOffsetStart
int colWithinBlockOffsetY = colOffsetStart / strideW;
// So, this is where we begin reading/writing in Y
int colY = colBlockY * colBlockSize + colWithinBlockOffsetY;
// This is the complete offset into Y from the start
// Each row has strideW blocks of size colBlockSize
int offsetY = rowY * colBlockSize * numColBlocks + colY;
T* colBufferPointer = colBufferData + c * tileSize;
T* yPointer =
Ydata + c_im * outputH * (colBlockSize * numColBlocks) + offsetY;
int b = 0;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
// We vectorize the loop within the row
{
constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float)) * 4;
int limit = (tileSize / kUnroll) * kUnroll;
for (; b < limit; b += kUnroll) {
float32x4_t cb0 = vld1q_f32(colBufferPointer + 0);
float32x4_t cb1 = vld1q_f32(colBufferPointer + 4);
float32x4_t cb2 = vld1q_f32(colBufferPointer + 8);
float32x4_t cb3 = vld1q_f32(colBufferPointer + 12);
float32x4_t y0 = vld1q_f32(yPointer + 0);
float32x4_t y1 = vld1q_f32(yPointer + 4);
float32x4_t y2 = vld1q_f32(yPointer + 8);
float32x4_t y3 = vld1q_f32(yPointer + 12);
y0 = vaddq_f32(y0, cb0);
y1 = vaddq_f32(y1, cb1);
y2 = vaddq_f32(y2, cb2);
y3 = vaddq_f32(y3, cb3);
vst1q_f32(yPointer + 0, y0);
vst1q_f32(yPointer + 4, y1);
vst1q_f32(yPointer + 8, y2);
vst1q_f32(yPointer + 12, y3);
colBufferPointer += kUnroll;
yPointer += kUnroll;
}
}
{
constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float));
int limit = (tileSize / kUnroll) * kUnroll;
for (; b < limit; b += kUnroll) {
float32x4_t cb0 = vld1q_f32(colBufferPointer);
float32x4_t y0 = vld1q_f32(yPointer);
y0 = vaddq_f32(y0, cb0);
vst1q_f32(yPointer, y0);
colBufferPointer += kUnroll;
yPointer += kUnroll;
}
}
#endif
// Handle un-vectorizable epilogue
for (; b < tileSize; ++b) {
*yPointer += *colBufferPointer;
++yPointer;
++colBufferPointer;
}
}
}
template <typename T, int N>
struct StoreInterleaved {};
template <>
struct StoreInterleaved<float, 1> {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
inline static void store(float* p, float32x4_t v[1]) {
vst1q_f32(p, v[0]);
}
#endif
inline static void store(float* p, float v[1]) {
p[0] = v[0];
}
};
template <>
struct StoreInterleaved<float, 2> {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
inline static void store(float* p, float32x4_t v[2]) {
float32x4x2_t x = {{v[0], v[1]}};
vst2q_f32(p, x);
}
#endif
inline static void store(float* p, float v[2]) {
p[0] = v[0];
p[1] = v[1];
}
};
template <>
struct StoreInterleaved<float, 3> {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
inline static void store(float* p, float32x4_t v[3]) {
float32x4x3_t x = {{v[0], v[1], v[2]}};
vst3q_f32(p, x);
}
#endif
inline static void store(float* p, float v[3]) {
p[0] = v[0];
p[1] = v[1];
p[2] = v[2];
}
};
template <>
struct StoreInterleaved<float, 4> {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
inline static void store(float* p, float32x4_t v[4]) {
float32x4x4_t x = {{v[0], v[1], v[2], v[3]}};
vst4q_f32(p, x);
}
#endif
inline static void store(float* p, float v[4]) {
p[0] = v[0];
p[1] = v[1];
p[2] = v[2];
p[3] = v[3];
}
};
template <int kStrideW>
void reinterleaveRows(
const float* src,
const float* bias,
int c,
int h,
float* dst,
int outputC,
int outputH,
int outputW,
int inputW,
int kernelW,
int strideW,
int adjH) {
// Each row in src is of the form:
// [w mod strideW == 0 elements]...[w mod strideW == strideW - 1
// elements]
// We need to re-interleave the values and write them in the output
int colBlockSize = inputW + kernelW / kStrideW;
int noAdjOutputW = (inputW - 1) * kStrideW + kernelW;
int point = c * outputH + h;
src += point * colBlockSize * kStrideW;
dst += point * outputW;
float b = bias ? bias[c] : 0;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
float32x4_t biasV = vdupq_n_f32(b);
#endif
int w = 0;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float)) * 2;
int limit = ((inputW - 1) / kUnroll) * kUnroll;
for (; w < limit; w += kUnroll) {
// We need to interleave in terms of kStrideW units
float32x4_t v0[kStrideW];
float32x4_t v1[kStrideW];
for (const auto i : c10::irange(kStrideW)) {
v0[i] = vld1q_f32(src + i * colBlockSize);
v1[i] = vld1q_f32(src + i * colBlockSize + 4);
}
// add per-channel bias
for (const auto i : c10::irange(kStrideW)) {
v0[i] = vaddq_f32(v0[i], biasV);
v1[i] = vaddq_f32(v1[i], biasV);
}
// Write interleaved into the output
StoreInterleaved<float, kStrideW>::store(dst + 0 * kStrideW, v0);
StoreInterleaved<float, kStrideW>::store(dst + 4 * kStrideW, v1);
src += kUnroll;
dst += kUnroll * kStrideW;
}
#endif
// Handle non-vectorizable remainder
for (; w < inputW - 1; ++w) {
float v[kStrideW];
for (const auto i : c10::irange(kStrideW)) {
v[i] = src[i * colBlockSize];
}
// add per-channel bias
for (const auto i : c10::irange(kStrideW)) {
v[i] += b;
}
// Write interleaved into the output
StoreInterleaved<float, kStrideW>::store(dst, v);
src += 1;
dst += kStrideW;
}
// We have handled 0 .. (inputW - 1) * stride inclusive so far.
// Handle the remainder
int outputPoint = (inputW - 1) * kStrideW;
int block = 0;
// Output width may include adjustment into which we don't
// write; ignore it
while (outputPoint < noAdjOutputW) {
float v = src[block * colBlockSize];
dst[0] = v + b;
++outputPoint;
dst += 1;
++block;
if (block >= kStrideW) {
block = 0;
src += 1;
}
}
// Remainder of the buffer comprised of just the `adj` must have
// bias added
for (; outputPoint < outputW; ++outputPoint) {
dst[0] = b;
dst += 1;
}
}
template <int N, typename T, typename Context>
void reinterleaveMultithreaded(
const T* y0,
const T* bias_data,
T* y,
int outputC,
int outputH,
int outputW,
int inputW,
int kernelW,
int strideW,
int adjH,
ThreadPool* pool) {
// # channels times height
size_t totalTiles = (size_t)outputC * outputH;
FixedDivisor<int> divOutputH(outputH);
#define REINTERLEAVE(N) \
do { \
reinterleaveRows<N>( \
y0, \
bias_data, \
c, \
h, \
y, \
outputC, \
outputH, \
outputW, \
inputW, \
kernelW, \
strideW, \
adjH); \
} while (false)
std::function<void(int, size_t)> fnReinterleave = [&](int threadId,
size_t tileId) {
int h;
int c;
divOutputH.DivMod((int)tileId, &c, &h);
REINTERLEAVE(N);
};
#undef REINTERLEAVE
pool->run(fnReinterleave, totalTiles);
}
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <int N>
struct SumMultiple {
static void sumInto(float* acc, float** toSum, size_t size);
};
template <>
struct SumMultiple<1> {
static void sumInto(float* acc, float** toSum, size_t size) {
constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float));
int limit = (size / kUnroll) * kUnroll;
auto toSum0 = toSum[0];
size_t i = 0;
for (; i < limit; i += kUnroll) {
float32x4_t v0 = vld1q_f32_aligned(acc + i);
float32x4_t v1 = vld1q_f32_aligned(toSum0 + i);
v0 = vaddq_f32(v0, v1);
vst1q_f32_aligned(acc + i, v0);
}
for (; i < size; ++i) {
float v0 = acc[i];
float v1 = toSum0[i];
v0 += v1;
acc[i] = v0;
}
}
};
template <>
struct SumMultiple<2> {
static void sumInto(float* acc, float** toSum, size_t size) {
constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float));
int limit = (size / kUnroll) * kUnroll;
auto toSum0 = toSum[0];
auto toSum1 = toSum[1];
size_t i = 0;
for (; i < limit; i += kUnroll) {
float32x4_t v0 = vld1q_f32_aligned(acc + i);
float32x4_t v1 = vld1q_f32_aligned(toSum0 + i);
float32x4_t v2 = vld1q_f32_aligned(toSum1 + i);
v0 = vaddq_f32(v0, v1);
v0 = vaddq_f32(v0, v2);
vst1q_f32_aligned(acc + i, v0);
}
for (; i < size; ++i) {
float v0 = acc[i];
float v1 = toSum0[i];
float v2 = toSum1[i];
v0 += v1;
v0 += v2;
acc[i] = v0;
}
}
};
template <>
struct SumMultiple<3> {
static void sumInto(float* acc, float** toSum, size_t size) {
constexpr int kUnroll = (sizeof(float32x4_t) / sizeof(float));
int limit = (size / kUnroll) * kUnroll;
auto toSum0 = toSum[0];
auto toSum1 = toSum[1];
auto toSum2 = toSum[2];
size_t i = 0;
for (; i < limit; i += kUnroll) {
float32x4_t v0 = vld1q_f32_aligned(acc + i);
float32x4_t v1 = vld1q_f32_aligned(toSum0 + i);
float32x4_t v2 = vld1q_f32_aligned(toSum1 + i);
float32x4_t v3 = vld1q_f32_aligned(toSum2 + i);
v0 = vaddq_f32(v0, v1);
v2 = vaddq_f32(v2, v3);
v0 = vaddq_f32(v0, v2);
vst1q_f32_aligned(acc + i, v0);
}
for (; i < size; ++i) {
float v0 = acc[i];
float v1 = toSum0[i];
float v2 = toSum1[i];
float v3 = toSum2[i];
v0 += v1;
v2 += v3;
v0 += v2;
acc[i] = v0;
}
}
};
#endif
// Performs acc[i] += sum_j toSum_j[i] pointwise
void sumInto(float* acc, std::vector<float*>& toSum, size_t size) {
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
if (toSum.size() == 1) {
SumMultiple<1>::sumInto(acc, toSum.data(), size);
return;
} else if (toSum.size() == 2) {
SumMultiple<2>::sumInto(acc, toSum.data(), size);
return;
} else if (toSum.size() == 3) {
SumMultiple<3>::sumInto(acc, toSum.data(), size);
return;
}
#endif
// Otherwise, use fallback implementation
EigenVectorArrayMap<float> accT(acc, size);
for (auto p : toSum) {
accT += ConstEigenVectorArrayMap<float>(p, size);
}
}
template <typename T, class Context>
bool ConvTransposeMobileOp<T, Context>::RunOnDeviceWithOrderNCHW() {
const Tensor& X = Input(INPUT);
auto& filter = Input(FILTER);
const int N = X.dim32(0), M = X.dim32(1), H = X.dim32(2), W = X.dim32(3);
CAFFE_ENFORCE(filter.ndim() == 4, "filter must be 4D tensor");
CAFFE_ENFORCE(
filter.dim32(0) == M,
"filter number must be equal to input channel number");
const int C = filter.dim32(1);
CAFFE_ENFORCE(
filter.dim32(2) == this->kernel_h(),
"filter height must be equal to kernel height");
CAFFE_ENFORCE(
filter.dim32(3) == this->kernel_w(),
"filter width must be equal to kernel width");
if (InputSize() == 3) {
auto& bias = Input(BIAS);
CAFFE_ENFORCE(bias.ndim() == 1, "bias must be 1D tensor");
CAFFE_ENFORCE(
bias.dim32(0) == C,
"bias dimension must be equal to output channel number");
}
auto sizes = ConvTransposeUnpoolBase<Context>::GetOutputSize(X, C);
Tensor* Y = Output(0, sizes, at::dtype<T>());
if (X.numel() == 0) {
VLOG(2) << "Number of elements is 0 in ConvTransposeOp";
return true;
}
const int outputH = Y->dim32(2);
const int outputW = Y->dim32(3);
const int outputPlaneSize = outputH * outputW;
const int outputBatchElementSize = Y->dim32(1) * outputPlaneSize;
auto Xdata = X.template data<T>();
auto Ydata = Y->template mutable_data<T>();
auto pool = ws_->GetThreadPool();
auto numThreads = pool->getNumThreads();
// Initialize per-thread buffers for output
// The main thread will write directly into the output Y, we just
// need buffers for the worker threads
size_t colBlockSize = W + this->kernel_w() / this->stride_w();
size_t threadYBufferSize = C * outputH * colBlockSize * this->stride_w();
// Require 16 byte alignment, so 4-element alignment as these are floats.
size_t threadYBufferSizeAligned =
((C * outputH * colBlockSize * this->stride_w() + 3) / 4) * 4;
size_t threadColBufferSize = C * this->kernel_h() * this->kernel_w() * W;
// Work around GCC 4.9 bug when this is declared inside the inner lambda.
auto runLocalTile = [&](TensorCPU* threadBuffer,
int threadId,
size_t tileId) {
auto localYData = threadBuffer->template mutable_data<T>() +
threadId * threadYBufferSizeAligned;
auto localColBufferData = threadBuffer->template mutable_data<T>() +
numThreads * threadYBufferSizeAligned + threadId * threadColBufferSize;
runTileContiguous<T, Context>(
tileId,
N,
M,
H,
W,
outputH,
outputW,
C,
this->kernel_h(),
this->kernel_w(),
this->stride_h(),
this->stride_w(),
this->pad_t(),
filter.template data<T>(),
Xdata,
localColBufferData,
localYData,
&context_);
};
auto f = [&](Tensor* threadBuffer) {
threadBuffer->Resize(
numThreads * threadYBufferSizeAligned +
numThreads * threadColBufferSize);
// Group together thread buffers for accumulation
std::vector<T*> toSum(numThreads - 1);
for (const auto i : c10::irange(1, numThreads)) {
toSum[i - 1] = threadBuffer->template mutable_data<T>() +
i * threadYBufferSizeAligned;
}
for (const auto image_id : c10::irange(N)) {
// Each time through, we have to reset all per-thread output
// buffers, since the output buffer is only per-batch element
// The column buffers are overwritten by the matrix multiplication
// each time, so we need not clear them out each round
math::Set<T, Context>(
numThreads * threadYBufferSizeAligned,
0,
threadBuffer->template mutable_data<T>(),
&context_);
// Run tiled gemm and col2im in our threadpool; all of these tiles
// are guaranteed to be full tiles
// Each tile handles a single row of the input
pool->run(
[&](int threadId, int tileId) {
runLocalTile(threadBuffer, threadId, tileId);
},
H);
// We need to accumulate the per-thread results into the output
// Y; the first worker thread (main thread) already produced its
// results in Y
sumInto(
threadBuffer->template mutable_data<T>(), toSum, threadYBufferSize);
// y0 now contains the final output, but it is in deinterleaved
// form. We have to re-interleave it to produce the final form in Y
// This operation also handles adding the per-channel bias.
#define REINTERLEAVE(N) \
do { \
reinterleaveMultithreaded<N, T, Context>( \
threadBuffer->template mutable_data<T>(), \
InputSize() == 3 ? Input(BIAS).template data<T>() : nullptr, \
Ydata, \
Y->dim32(1), \
Y->dim32(2), \
Y->dim32(3), \
W, \
this->kernel_w(), \
this->stride_w(), \
this->adj_h(), \
pool); \
} while (false)
if (this->stride_w() == 1) {
REINTERLEAVE(1);
} else if (this->stride_w() == 2) {
REINTERLEAVE(2);
} else if (this->stride_w() == 3) {
REINTERLEAVE(3);
} else if (this->stride_w() == 4) {
REINTERLEAVE(4);
}
#undef REINTERLEAVE
Xdata += M * H * W;
Ydata += Y->size() / Y->dim32(0);
}
};
if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {
runWithSharedBuffer<Context>(ws_, f);
} else {
f(&threadBuffer_);
}
return true;
}
template <typename T, class Context>
bool ConvTransposeMobileOp<T, Context>::RunOnDeviceWithOrderNHWC() {
CAFFE_THROW("Not implemented.");
}
} // namespace caffe2
#endif // C10_MOBILE
#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_MOBILE_OP_IMPL_H_
| 19,632
| 26.967236
| 79
|
h
|
null |
pytorch-main/caffe2/operators/conv_transpose_unpool_op_base.h
|
#ifndef CAFFE2_OPERATORS_CONV_TRANSPOSE_UNPOOL_OP_BASE_H_
#define CAFFE2_OPERATORS_CONV_TRANSPOSE_UNPOOL_OP_BASE_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_op_shared.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/proto/caffe2_legacy.pb.h"
#include "caffe2/utils/math.h"
C10_DECLARE_bool(caffe2_force_shared_col_buffer);
namespace caffe2 {
template <class Context>
class ConvTransposeUnpoolBase : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit ConvTransposeUnpoolBase(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
legacy_pad_(
static_cast<LegacyPadding>(this->template GetSingleArgument<int>(
"legacy_pad",
LegacyPadding::NOTSET))),
kernel_(this->template GetRepeatedArgument<int>("kernels")),
stride_(this->template GetRepeatedArgument<int>("strides")),
pads_(this->template GetRepeatedArgument<int>("pads")),
adj_(this->template GetRepeatedArgument<int>("adjs")),
group_(this->template GetSingleArgument<int>("group", 1)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))),
shared_buffer_(
this->template GetSingleArgument<int>("shared_buffer", 0)),
ws_(ws) {
// For the padding, they should either be the legacy padding strategy
// (VALID or SAME), or an explicit, non-negative value.
if (legacy_pad_ == LegacyPadding::VALID ||
legacy_pad_ == LegacyPadding::SAME) {
CAFFE_ENFORCE(
!OperatorBase::HasArgument("pads"),
"If you use legacy padding VALID or SAME, you should not specify "
"any specific padding values.");
}
// Get old arguments values.
if (OperatorBase::HasArgument("kernel")) {
kernel_.resize(2, this->template GetSingleArgument<int>("kernel", 0));
} else if (
OperatorBase::HasArgument("kernel_h") &&
OperatorBase::HasArgument("kernel_w")) {
kernel_.push_back(this->template GetSingleArgument<int>("kernel_h", 0));
kernel_.push_back(this->template GetSingleArgument<int>("kernel_w", 0));
}
if (OperatorBase::HasArgument("stride")) {
stride_.resize(2, this->template GetSingleArgument<int>("stride", 0));
} else if (
OperatorBase::HasArgument("stride_h") &&
OperatorBase::HasArgument("stride_w")) {
stride_.push_back(this->template GetSingleArgument<int>("stride_h", 0));
stride_.push_back(this->template GetSingleArgument<int>("stride_w", 0));
}
if (OperatorBase::HasArgument("adj")) {
adj_.resize(2, this->template GetSingleArgument<int>("adj", 0));
} else if (
OperatorBase::HasArgument("adj_h") &&
OperatorBase::HasArgument("adj_w")) {
adj_.push_back(this->template GetSingleArgument<int>("adj_h", 0));
adj_.push_back(this->template GetSingleArgument<int>("adj_w", 0));
}
if (OperatorBase::HasArgument("pad")) {
CAFFE_ENFORCE(
legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME,
"If you use legacy padding VALID or SAME, you should not specify "
"any specific padding values.");
pads_.resize(4, this->template GetSingleArgument<int>("pad", 0));
} else if (
OperatorBase::HasArgument("pad_t") &&
OperatorBase::HasArgument("pad_l") &&
OperatorBase::HasArgument("pad_b") &&
OperatorBase::HasArgument("pad_r")) {
CAFFE_ENFORCE(
legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME,
"If you use legacy padding VALID or SAME, you should not specify "
"any specific padding values.");
pads_.push_back(this->template GetSingleArgument<int>("pad_t", 0));
pads_.push_back(this->template GetSingleArgument<int>("pad_l", 0));
pads_.push_back(this->template GetSingleArgument<int>("pad_b", 0));
pads_.push_back(this->template GetSingleArgument<int>("pad_r", 0));
}
// Fill default values.
if (kernel_.size() == 0) {
kernel_.assign({0, 0});
}
if (stride_.size() == 0) {
stride_.resize(kernel_.size(), 1);
}
if (pads_.size() == 0) {
pads_.resize(kernel_.size() * 2, 0);
}
if (adj_.size() == 0) {
adj_.resize(kernel_.size(), 0);
}
CAFFE_ENFORCE_EQ(stride_.size(), kernel_.size());
CAFFE_ENFORCE_EQ(adj_.size(), kernel_.size());
if (legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME) {
CAFFE_ENFORCE_EQ(pads_.size(), 2 * kernel_.size());
}
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto dim : c10::irange(kernel_.size())) {
CAFFE_ENFORCE_GT(kernel_[dim], 0);
CAFFE_ENFORCE_GT(stride_[dim], 0);
CAFFE_ENFORCE_GE(adj_[dim], 0);
CAFFE_ENFORCE_LE(adj_[dim], stride_[dim]);
}
// Create shared buffer mutex in the constructor
// to avoid race-condition in DAGNet.
if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {
createSharedBuffer<Context>(ws_);
}
}
// Gets the output size. The output channel is manually specified.
std::vector<int64_t> GetOutputSize(const Tensor& input, int output_channel) {
CAFFE_ENFORCE(4 == input.dim());
CAFFE_ENFORCE_GT(input.size_from_dim(1), 0);
int N = input.dim32(0);
bool channel_first = false; // initialized to suppress compiler warning.
int H = 0, W = 0; // initialized to suppress compiler warning.
int M = 0;
switch (order_) {
case StorageOrder::NHWC:
channel_first = false;
H = input.dim32(1);
W = input.dim32(2);
M = input.dim32(3);
break;
case StorageOrder::NCHW:
channel_first = true;
M = input.dim32(1);
H = input.dim32(2);
W = input.dim32(3);
break;
default:
LOG(FATAL) << "Unknown Storage order: " << order_;
}
int output_height = 0, output_width = 0;
ComputeSizeAndPad(
H,
stride_[0],
kernel_[0],
adj_[0],
&pads_[0],
&pads_[2],
&output_height);
ComputeSizeAndPad(
W,
stride_[1],
kernel_[1],
adj_[1],
&pads_[1],
&pads_[3],
&output_width);
std::vector<int64_t> sizes;
if (channel_first) {
sizes = {N, output_channel, output_height, output_width};
} else {
sizes = {N, output_height, output_width, output_channel};
}
VLOG(2) << "In: N " << N << " M " << M << " H " << H << " W " << W;
VLOG(2) << "Out: output_channel " << output_channel << " H "
<< output_height << " W " << output_width;
return sizes;
}
bool RunOnDevice() override {
switch (order_) {
case StorageOrder::NHWC:
return RunOnDeviceWithOrderNHWC();
case StorageOrder::NCHW:
return RunOnDeviceWithOrderNCHW();
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// To suppress old compiler warnings
return true;
}
virtual bool RunOnDeviceWithOrderNCHW() {
CAFFE_THROW("Not implemented");
}
virtual bool RunOnDeviceWithOrderNHWC() {
CAFFE_THROW("Not implemented");
}
~ConvTransposeUnpoolBase() override {}
protected:
// Accessors for 2D conv params.
inline int pad_t() const {
return pads_[0];
}
inline int pad_l() const {
return pads_[1];
}
inline int pad_b() const {
return pads_[2];
}
inline int pad_r() const {
return pads_[3];
}
inline int kernel_h() const {
return kernel_[0];
}
inline int kernel_w() const {
return kernel_[1];
}
inline int stride_h() const {
return stride_[0];
}
inline int stride_w() const {
return stride_[1];
}
inline int adj_h() const {
return adj_[0];
}
inline int adj_w() const {
return adj_[1];
}
inline void ComputeSizeAndPad(
const int in_size,
const int stride,
const int kernel,
const int adj,
int* pad_head,
int* pad_tail,
int* out_size) {
switch (legacy_pad_) {
case LegacyPadding::NOTSET:
CAFFE_ENFORCE(*pad_head >= 0);
CAFFE_ENFORCE(*pad_tail >= 0);
*out_size =
(in_size - 1) * stride + kernel + adj - *pad_head - *pad_tail;
break;
// We handle cases of LegacyPadding::VALID and LegacyPadding::SAME
// the same way
case LegacyPadding::VALID:
case LegacyPadding::SAME:
*pad_head = 0;
*pad_tail = 0;
*out_size = (in_size - 1) * stride + kernel + adj;
break;
case LegacyPadding::CAFFE_LEGACY_POOLING:
LOG(FATAL) << "CAFFE_LEGACY_POOLING is no longer supported.";
break;
}
}
LegacyPadding legacy_pad_;
int pad_;
std::vector<int> kernel_;
std::vector<int> stride_;
std::vector<int> pads_;
std::vector<int> adj_;
int group_;
StorageOrder order_;
bool shared_buffer_;
Workspace* ws_;
};
#define USE_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS(Context) \
USE_OPERATOR_FUNCTIONS(Context); \
using ConvTransposeUnpoolBase<Context>::kernel_; \
using ConvTransposeUnpoolBase<Context>::kernel_h; \
using ConvTransposeUnpoolBase<Context>::kernel_w; \
using ConvTransposeUnpoolBase<Context>::stride_; \
using ConvTransposeUnpoolBase<Context>::stride_h; \
using ConvTransposeUnpoolBase<Context>::stride_w; \
using ConvTransposeUnpoolBase<Context>::pads_; \
using ConvTransposeUnpoolBase<Context>::pad_t; \
using ConvTransposeUnpoolBase<Context>::pad_l; \
using ConvTransposeUnpoolBase<Context>::pad_b; \
using ConvTransposeUnpoolBase<Context>::pad_r; \
using ConvTransposeUnpoolBase<Context>::adj_; \
using ConvTransposeUnpoolBase<Context>::group_; \
using ConvTransposeUnpoolBase<Context>::order_; \
using ConvTransposeUnpoolBase<Context>::shared_buffer_; \
using ConvTransposeUnpoolBase<Context>::ws_
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CONV_TRANSPOSE_UNPOOL_OP_BASE_H_
| 10,308
| 31.215625
| 79
|
h
|
null |
pytorch-main/caffe2/operators/copy_op.h
|
#ifndef CAFFE2_OPERATORS_COPY_OP_H_
#define CAFFE2_OPERATORS_COPY_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(CopyGPUToCPU)
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(CopyCPUToGPU)
namespace caffe2 {
template <class Context, class DstContext, class SrcContext>
class CopyOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(CopyOp)
bool RunOnDevice() override {
auto& input = this->template Input<Tensor>(0, SrcContext::GetDeviceType());
auto* output =
this->template Output<Tensor>(0, DstContext::GetDeviceType());
output->ResizeLike(input);
this->context_.template CopyItems<SrcContext, DstContext>(
input.dtype(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.dtype()));
return true;
}
};
template <class Context, class DstContext, class SrcContext>
class CopyOnDeviceLikeOp : public CopyOp<Context, DstContext, SrcContext> {
public:
template <class... Args>
explicit CopyOnDeviceLikeOp(Args&&... args)
: CopyOp<Context, DstContext, SrcContext>(std::forward<Args>(args)...) {}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_COPY_OP_H_
| 1,296
| 28.477273
| 79
|
h
|
null |
pytorch-main/caffe2/operators/copy_rows_to_tensor_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include <unordered_map>
#include <unordered_set>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
namespace caffe2 {
template <class Context>
class CopyRowsToTensorOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CopyRowsToTensorOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
return DispatchHelper<
TensorTypes<at::Half, float, double, int32_t, int64_t>>::
call(this, Input(INPUT_TENSOR));
}
template <typename T>
bool DoRunWithType() {
auto& input_tensor = Input(INPUT_TENSOR);
auto& indices = Input(INDICES);
auto& row = Input(ROW);
auto tensor_width = input_tensor.size(1);
CAFFE_ENFORCE_EQ(input_tensor.dim(), 2, "INPUT_TENSOR should be 2-d");
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES should be 1-d");
CAFFE_ENFORCE_EQ(row.dim(), 1, "ROW should be 1-d");
CAFFE_ENFORCE_EQ(
tensor_width,
row.size(0),
"width of input tensor should match lengths of row");
const auto* indices_data = indices.template data<int64_t>();
const auto* row_data = row.template data<T>();
auto* output = Output(0);
auto* output_data = output->template mutable_data<T>();
CAFFE_ENFORCE(
IsInputOutputAlias(0, 0), "Input 0 and Output 0 should be alias.");
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (size_t i = 0; i < indices.sizes()[0]; ++i) {
std::memcpy(
output_data + indices_data[i] * tensor_width,
row_data,
tensor_width * sizeof(T));
}
return true;
}
protected:
INPUT_TAGS(INPUT_TENSOR, INDICES, ROW);
};
template <class Context>
class CopyRowsToTensorGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CopyRowsToTensorGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
return DispatchHelper<
TensorTypes<at::Half, float, double, int32_t, int64_t>>::
call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
auto* output = Output(0);
output->ResizeLike(Input(0));
auto* output_data = output->template mutable_data<T>();
auto& input = Input(0);
const auto* input_data = input.template data<T>();
std::memcpy(output_data, input_data, input.size(0) * sizeof(T));
return true;
}
};
} // namespace caffe2
| 2,599
| 29.952381
| 76
|
h
|
null |
pytorch-main/caffe2/operators/cosine_embedding_criterion_op.h
|
#ifndef CAFFE2_OPERATORS_COSINE_EMBEDDING_CRITERION_OP_H_
#define CAFFE2_OPERATORS_COSINE_EMBEDDING_CRITERION_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class CosineEmbeddingCriterionOp final : public Operator<Context> {
public:
template <class... Args>
explicit CosineEmbeddingCriterionOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(float, "margin", margin_, 0.0) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
float margin_;
};
template <class Context>
class CosineEmbeddingCriterionGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit CosineEmbeddingCriterionGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(float, "margin", margin_, 0.0) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
float margin_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_COSINE_EMBEDDING_CRITERION_OP_H_
| 1,127
| 25.232558
| 75
|
h
|
null |
pytorch-main/caffe2/operators/counter_ops.h
|
#ifndef CAFFE2_OPERATORS_COUNTER_OPS_H
#define CAFFE2_OPERATORS_COUNTER_OPS_H
#include <atomic>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T>
class TORCH_API Counter {
public:
explicit Counter(T count) : count_(count) {}
bool countDown() {
if (count_-- > 0) {
return false;
}
return true;
}
T countUp() {
return count_++;
}
T retrieve() const {
return count_.load();
}
T checkIfDone() const {
return (count_.load() <= 0);
}
T reset(T init_count) {
return count_.exchange(init_count);
}
private:
std::atomic<T> count_;
};
// TODO(jiayq): deprecate these ops & consolidate them with IterOp/AtomicIterOp
template <typename T, class Context>
class CreateCounterOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CreateCounterOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
init_count_(this->template GetSingleArgument<T>("init_count", 0)) {
CAFFE_ENFORCE_LE(0, init_count_, "negative init_count is not permitted.");
}
bool RunOnDevice() override {
*this->template Output<std::unique_ptr<Counter<T>>>(0) =
std::unique_ptr<Counter<T>>(new Counter<T>(init_count_));
return true;
}
private:
T init_count_ = 0;
};
template <typename T, class Context>
class ResetCounterOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ResetCounterOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
init_count_(this->template GetSingleArgument<T>("init_count", 0)) {
CAFFE_ENFORCE_LE(0, init_count_, "negative init_count is not permitted.");
}
bool RunOnDevice() override {
auto& counterPtr = this->template Input<std::unique_ptr<Counter<T>>>(0);
auto previous = counterPtr->reset(init_count_);
if (OutputSize() == 1) {
auto* output = Output(0);
output->Resize();
*output->template mutable_data<T>() = previous;
}
return true;
}
private:
T init_count_;
};
// Will always use TensorCPU regardless the Context
template <typename T, class Context>
class CountDownOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CountDownOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
auto& counterPtr = this->template Input<std::unique_ptr<Counter<T>>>(0);
auto* output = Output(0);
output->Resize(std::vector<int>{});
*output->template mutable_data<bool>() = counterPtr->countDown();
return true;
}
};
// Will always use TensorCPU regardless the Context
template <typename T, class Context>
class CheckCounterDoneOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CheckCounterDoneOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
auto& counterPtr = this->template Input<std::unique_ptr<Counter<T>>>(0);
auto* output = Output(0);
output->Resize(std::vector<int>{});
*output->template mutable_data<bool>() = counterPtr->checkIfDone();
return true;
}
};
// Will always use TensorCPU regardless the Context
template <typename T, class Context>
class CountUpOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CountUpOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
auto& counterPtr = this->template Input<std::unique_ptr<Counter<T>>>(0);
auto* output = Output(0);
output->Resize(std::vector<int>{});
*output->template mutable_data<T>() = counterPtr->countUp();
return true;
}
};
// Will always use TensorCPU regardless the Context
template <typename T, class Context>
class RetrieveCountOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit RetrieveCountOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
auto& counterPtr = this->template Input<std::unique_ptr<Counter<T>>>(0);
auto* output = Output(0);
output->Resize(std::vector<int>{});
*output->template mutable_data<T>() = counterPtr->retrieve();
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_COUNTER_OPS_H_
| 4,596
| 26.860606
| 79
|
h
|
null |
pytorch-main/caffe2/operators/create_scope_op.h
|
#ifndef CAFFE2_OPERATORS_CREATE_SCOPE_OP_H_
#define CAFFE2_OPERATORS_CREATE_SCOPE_OP_H_
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/proto/caffe2_pb.h"
C10_DECLARE_bool(caffe2_workspace_stack_debug);
namespace caffe2 {
namespace detail {
/*
* Keeps track of forward and backward gradient workspaces in stack,
* reuses previously created workspaces, non-thread safe
*/
class TORCH_API WorkspaceStack {
public:
explicit WorkspaceStack() : parent_ws_(nullptr), top_(-1) {}
std::shared_ptr<Workspace> pushForwardWorkspace(Workspace* parent_ws) {
return pushForwardWorkspace(
parent_ws, std::unordered_map<std::string, std::string>());
}
std::shared_ptr<Workspace> pushForwardWorkspace(
Workspace* parent_ws,
const std::unordered_map<std::string, std::string>& blob_bindings) {
checkStack();
if (FLAGS_caffe2_workspace_stack_debug) {
if (parent_ws_) {
CAFFE_ENFORCE_EQ(parent_ws_, parent_ws, "Parent workspace mismatch");
} else {
parent_ws_ = parent_ws;
}
if (!blob_bindings_.empty()) {
checkBindingsMatch(blob_bindings_, blob_bindings);
} else {
blob_bindings_ = blob_bindings;
}
}
if (top_ == workspaces_.size() - 1) {
workspaces_.push_back(
std::make_shared<Workspace>(parent_ws, blob_bindings));
} else {
// when reusing workspace, make sure copies of external blobs are
// removed and blob bindings are set
auto& workspace = workspaces_[top_ + 1];
const auto& local_blobs = workspace->LocalBlobs();
std::unordered_set<std::string> local_blobs_set;
local_blobs_set.insert(local_blobs.begin(), local_blobs.end());
bool found_local_copy = false;
for (const auto& blob_pair : blob_bindings) {
if (local_blobs_set.count(blob_pair.first)) {
workspace->RemoveBlob(blob_pair.first);
found_local_copy = true;
}
}
if (found_local_copy) {
workspace->AddBlobMapping(parent_ws, blob_bindings);
}
}
return workspaces_[++top_];
}
std::shared_ptr<Workspace> popGradientWorkspace(
Workspace* parent_ws,
const std::unordered_map<std::string, std::string>& grad_blob_bindings) {
checkStack();
if (FLAGS_caffe2_workspace_stack_debug) {
if (parent_ws_) {
CAFFE_ENFORCE_EQ(parent_ws_, parent_ws, "Parent workspace mismatch");
} else {
parent_ws_ = parent_ws;
}
if (!grad_blob_bindings_.empty()) {
checkBindingsMatch(grad_blob_bindings_, grad_blob_bindings);
} else {
grad_blob_bindings_ = grad_blob_bindings;
}
}
if (top_ < 0) {
return nullptr;
}
auto& grad_workspace = workspaces_[top_];
grad_workspace->AddBlobMapping(parent_ws, grad_blob_bindings, true);
--top_;
return grad_workspace;
}
std::shared_ptr<Workspace> reuseLastForwardWorkspace(Workspace* parent_ws) {
return reuseLastForwardWorkspace(
parent_ws, std::unordered_map<std::string, std::string>());
}
std::shared_ptr<Workspace> reuseLastForwardWorkspace(
Workspace* parent_ws,
const std::unordered_map<std::string, std::string>& blob_bindings) {
checkStack();
if (top_ < 0) {
return nullptr;
}
workspaces_[top_]->AddBlobMapping(parent_ws, blob_bindings);
return workspaces_[top_];
}
void clear() {
checkStack();
top_ = -1;
}
bool empty() const {
return top_ < 0;
}
private:
void checkStack() const {
CAFFE_ENFORCE_GT(
(int)workspaces_.size(), top_, "Corrupted workspaces stack");
}
void checkBindingsMatch(
const std::unordered_map<std::string, std::string>& bindings,
const std::unordered_map<std::string, std::string>& test_bindings) const {
CAFFE_ENFORCE_EQ(
bindings.size(), test_bindings.size(), "Blob bindings mismatch");
for (const auto& blob_binding : bindings) {
CAFFE_ENFORCE(
test_bindings.count(blob_binding.first), "Blob bindings mismatch");
CAFFE_ENFORCE_EQ(
test_bindings.at(blob_binding.first),
blob_binding.second,
"Blob bindings mismatch");
}
}
std::unordered_map<std::string, std::string> blob_bindings_;
std::unordered_map<std::string, std::string> grad_blob_bindings_;
Workspace* parent_ws_;
int top_;
std::vector<std::shared_ptr<Workspace>> workspaces_;
};
} // namespace detail
template <class Context>
class CreateScopeOp final : public Operator<Context> {
public:
template <class... Args>
explicit CreateScopeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
template <class Context>
class HasScopeOp final : public Operator<Context> {
public:
template <class... Args>
explicit HasScopeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CREATE_SCOPE_OP_H_
| 5,232
| 28.234637
| 80
|
h
|
null |
pytorch-main/caffe2/operators/cross_entropy_op.h
|
#ifndef CAFFE2_OPERATORS_CROSS_ENTROPY_OP_H_
#define CAFFE2_OPERATORS_CROSS_ENTROPY_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class LabelCrossEntropyOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(LabelCrossEntropyOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
static constexpr T kLOG_THRESHOLD() {
return static_cast<T>(1e-20);
}
// Input: X, label
// Output: Y
};
template <typename T, class Context>
class LabelCrossEntropyGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(LabelCrossEntropyGradientOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: X, label, dY
// Ouptut: dX. There is no gradient with respect to the label.
static constexpr T kLOG_THRESHOLD() {
return static_cast<T>(1e-20);
}
};
// Hacky: turns a vector of probabilities into a 2-column matrix with
// complimentary probabilities for binary classification
template <typename T, class Context>
class MakeTwoClassOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(MakeTwoClassOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: X
// Output: Y = vstack(1-X, X)
};
template <typename T, class Context>
class MakeTwoClassGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(MakeTwoClassGradientOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: dY
// Ouptut: dX
};
template <typename T, class Context>
class SigmoidCrossEntropyWithLogitsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SigmoidCrossEntropyWithLogitsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
log_D_trick_(
this->template GetSingleArgument<bool>("log_D_trick", false)),
unjoined_lr_loss_(
this->template GetSingleArgument<bool>("unjoined_lr_loss", false)) {
CAFFE_ENFORCE(
!(log_D_trick_ && unjoined_lr_loss_),
"log_D_trick_ and unjoined_lr_loss_ cannot be set as True simultaneously");
}
bool RunOnDevice() override;
protected:
bool log_D_trick_;
bool unjoined_lr_loss_;
};
template <typename T, class Context>
class SigmoidCrossEntropyWithLogitsGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SigmoidCrossEntropyWithLogitsGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
log_D_trick_(
this->template GetSingleArgument<bool>("log_D_trick", false)),
unjoined_lr_loss_(
this->template GetSingleArgument<bool>("unjoined_lr_loss", false)) {
}
bool RunOnDevice() override;
protected:
bool log_D_trick_;
bool unjoined_lr_loss_;
};
template <typename T, class Context>
class WeightedSigmoidCrossEntropyWithLogitsOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(WeightedSigmoidCrossEntropyWithLogitsOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
template <typename T, class Context>
class WeightedSigmoidCrossEntropyWithLogitsGradientOp final
: public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(WeightedSigmoidCrossEntropyWithLogitsGradientOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
template <typename T, class Context>
class TORCH_API CrossEntropyOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(CrossEntropyOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: X, label
// Output: Y
static constexpr T kLOG_THRESHOLD() {
return static_cast<T>(1e-20);
}
};
template <typename T, class Context>
class TORCH_API CrossEntropyGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(CrossEntropyGradientOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: X, label, dY
// Ouptut: dX. There is no gradient with respect to the label.
static constexpr T kLOG_THRESHOLD() {
return static_cast<T>(1e-20);
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CROSS_ENTROPY_OP_H_
| 4,420
| 26.63125
| 83
|
h
|
null |
pytorch-main/caffe2/operators/ctc_beam_search_decoder_op.h
|
#ifndef CAFFE2_OPERATORS_CTC_BEAM_SEARCH_OP_H_
#define CAFFE2_OPERATORS_CTC_BEAM_SEARCH_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class CTCBeamSearchDecoderOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CTCBeamSearchDecoderOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
beam_width_ = this->template GetSingleArgument<int32_t>("beam_width", 10);
num_candidates_ =
this->template GetSingleArgument<int32_t>("num_candidates", 1);
prune_threshold_ =
this->template GetSingleArgument<float>("prune_threshold", 0.001);
DCHECK(beam_width_ >= num_candidates_);
}
bool RunOnDevice() override;
protected:
int32_t beam_width_;
int32_t num_candidates_;
float prune_threshold_;
INPUT_TAGS(INPUTS, SEQ_LEN);
OUTPUT_TAGS(OUTPUT_LEN, VALUES, OUTPUT_PROB);
// Input: X, 3D tensor; L, 1D tensor. Output: Y sparse tensor
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CTC_BEAM_SEARCH_OP_H_
| 1,102
| 28.026316
| 78
|
h
|
null |
pytorch-main/caffe2/operators/ctc_greedy_decoder_op.h
|
#ifndef CAFFE2_OPERATORS_CTC_GREEDY_DECODER_OP_H_
#define CAFFE2_OPERATORS_CTC_GREEDY_DECODER_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class CTCGreedyDecoderOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CTCGreedyDecoderOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
merge_repeated_ =
this->template GetSingleArgument<bool>("merge_repeated", true);
}
bool RunOnDevice() override;
protected:
bool merge_repeated_;
INPUT_TAGS(INPUTS, SEQ_LEN);
OUTPUT_TAGS(OUTPUT_LEN, VALUES);
// Input: X, 3D tensor; L, 1D tensor. Output: Y sparse tensor
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CTC_GREEDY_DECODER_OP_H_
| 817
| 24.5625
| 71
|
h
|
null |
pytorch-main/caffe2/operators/dataset_ops.h
|
#ifndef CAFFE2_OPERATORS_DATASET_OPS_H_
#define CAFFE2_OPERATORS_DATASET_OPS_H_
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#include "caffe2/core/blob.h"
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
namespace dataset_ops {
// used for lengths tensors in the dataset
using TLength = int32_t;
// used for all internal dataset operations (offsets, sizes to read, etc.)
using TOffset = int64_t;
/**
* Provides functionality to iterate across a list of tensors where some
* of those tensors represent lengths in a hierarchical structure.
*/
class TreeIterator {
public:
struct FieldDesc {
int id;
int lengthFieldId = -1;
std::string name;
};
explicit TreeIterator(const std::vector<std::string>& fields);
void advance(
const std::vector<const TLength*>& lengths,
std::vector<TOffset>& offsets,
std::vector<TOffset>& sizes,
std::vector<TOffset>& limits,
TOffset num);
// Corresponds to the number of fields that have "length" as its last name
int numLengthFields() const {
return lengthFieldIds_.size();
}
// Corresponds to the number of length fields + 1 (for the top-level domain)
int numOffsetFields() const {
return numLengthFields() + 1;
}
// Get lengthField description for the given field
const FieldDesc* lengthFieldFor(const FieldDesc& desc) {
return (desc.lengthFieldId == -1)
? nullptr
: &fields_.at(lengthFieldIds_.at(desc.lengthFieldId));
}
// Get lengthField description for the given lengthFieldId, where
// 0 <= lengthFieldId < numLengthFields()
const FieldDesc& lengthField(int lengthFieldId) {
return fields_.at(lengthFieldIds_.at(lengthFieldId));
}
// Returns the index into the 'offset' vector for the given field.
int offsetFieldIdFor(const FieldDesc& fieldDesc) {
return fieldDesc.lengthFieldId + 1;
}
// Returns the field description for all fields.
const std::vector<FieldDesc>& fields() {
return fields_;
}
const std::vector<int>& lengthFieldIds() const {
return lengthFieldIds_;
}
private:
// Description of each field
std::vector<FieldDesc> fields_;
// Index into fields_ above for the fields that are lengths.
std::vector<int> lengthFieldIds_;
};
class TreeCursor {
public:
explicit TreeCursor(const TreeIterator& iterator) : it(iterator) {}
std::vector<TOffset> offsets;
std::mutex mutex_;
TreeIterator it;
};
/**
* Simple wrapper class allowing an easy traversal of the tensors representing
* the hierarchical structure.
*/
class TreeWalker {
public:
TreeWalker(const vector<const Blob*>& inputs, TreeCursor& cursor);
// Returns the number of records in a dataset
inline TOffset size() const {
return limits_.at(0);
}
void advance();
private:
inline const TensorCPU& input(int32_t idx) const {
return inputs_[idx]->Get<TensorCPU>();
}
// TODO: Change to fieldDesc
inline const TreeIterator::FieldDesc& field(int idx) const {
return cursor_.it.fields().at(idx);
}
inline int lengthIdx(int fieldId) const {
return field(fieldId).lengthFieldId + 1;
}
inline TOffset offset(int fieldId) const {
return prevOffsets_[lengthIdx(fieldId)];
}
std::vector<int64_t> fieldDim(int fieldId) const;
void* fieldPtr(int fieldId) const;
public:
// Simple Proxy class to expose nicer API for field access
class Field {
public:
Field(TreeWalker& walker, int fieldId)
: walker_(walker), fieldId_(fieldId) {}
inline std::vector<int64_t> dim() const {
return walker_.fieldDim(fieldId_);
}
inline int64_t size() const {
int64_t size = 1;
for (const auto d : dim()) {
size *= d;
}
return size;
}
inline const TypeMeta meta() const {
return walker_.input(fieldId_).dtype();
}
inline void* ptr() const {
return walker_.fieldPtr(fieldId_);
}
int fieldId() const {
return fieldId_;
}
inline TOffset offset() const {
return walker_.offset(fieldId_);
}
private:
const TreeWalker& walker_;
const int fieldId_;
};
// Notice that a reference is returned. If advance() is called the fields will
// be updated to represent the new state.
inline const std::vector<Field>& fields() const {
return fields_;
}
private:
void gatherLengthData();
void gatherSizeLimits();
const vector<const Blob*>& inputs_;
TreeCursor& cursor_;
std::vector<Field> fields_;
std::vector<const TLength*> lengths_;
std::vector<TOffset> limits_;
std::vector<TOffset> sizes_;
std::vector<TOffset> offsets_;
std::vector<TOffset> prevOffsets_;
};
using SharedTensorVectorPtr = std::shared_ptr<std::vector<TensorCPU>>;
using Shared2DTensorVectorPtr =
std::shared_ptr<std::vector<std::vector<caffe2::TensorCPU>>>;
using Tensor2DVector = std::vector<std::vector<caffe2::TensorCPU>>;
using TensorVectorPtr = std::unique_ptr<std::vector<Tensor>>;
class SharedTensorVectorPtrSerializer : public BlobSerializerBase {
public:
void Serialize(
const void* pointer,
TypeMeta typeMeta,
const string& name,
BlobSerializerBase::SerializationAcceptor acceptor) override;
};
class SharedTensorVectorPtrDeserializer : public BlobDeserializerBase {
public:
void Deserialize(const BlobProto& proto, Blob* blob) override;
};
} // namespace dataset_ops
} // namespace caffe2
#endif // CAFFE2_OPERATORS_DATASET_OPS_H_
| 5,500
| 24.118721
| 80
|
h
|
null |
pytorch-main/caffe2/operators/deform_conv_op.h
|
#ifndef CAFFE2_OPERATORS_DEFORM_CONV_OP_H_
#define CAFFE2_OPERATORS_DEFORM_CONV_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_op_shared.h"
#include "caffe2/operators/conv_pool_op_base.h"
C10_DECLARE_bool(caffe2_force_shared_col_buffer);
namespace caffe2 {
template <typename T, class Context>
class DeformConvOpBase : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
explicit DeformConvOpBase(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<Context>(operator_def, ws),
deformable_group_(
this->template GetSingleArgument<int>("deformable_group", 1)) {}
~DeformConvOpBase() {}
protected:
void DeformableIm2col(
const T* data_im,
const T* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
T* data_col);
void DeformableCol2im(
const T* data_col,
const T* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
T* grad_im);
void DeformableCol2imCoord(
const T* data_col,
const T* data_im,
const T* data_offset,
at::IntArrayRef im_shape,
at::IntArrayRef col_shape,
T* grad_offset);
protected:
int deformable_group_;
#define USE_DEFORMABLE_CONV_BASE_FUNCTIONS(T, Context) \
USE_CONV_POOL_BASE_FUNCTIONS(Context); \
using DeformConvOpBase<T, Context>::deformable_group_; \
using DeformConvOpBase<T, Context>::DeformableIm2col; \
using DeformConvOpBase<T, Context>::DeformableCol2im; \
using DeformConvOpBase<T, Context>::DeformableCol2imCoord
};
template <typename T, class Context>
class DeformConvOp final : public DeformConvOpBase<T, Context> {
public:
USE_DEFORMABLE_CONV_BASE_FUNCTIONS(T, Context);
explicit DeformConvOp(const OperatorDef& operator_def, Workspace* ws)
: DeformConvOpBase<T, Context>(operator_def, ws) {
// Create shared buffer mutex in the constructor
// to avoid race-condition in DAGNet.
if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {
createSharedBuffer<Context>(ws_);
}
}
~DeformConvOp() {}
bool RunOnDeviceWithOrderNCHW() override;
private:
Tensor col_buffer_{Context::GetDeviceType()};
Tensor bias_multiplier_;
Tensor img_shape_device_{Context::GetDeviceType()};
Tensor col_buffer_shape_device_{Context::GetDeviceType()};
// Input: X, o, W, b
// Output: Y
INPUT_TAGS(INPUT, OFFSET, FILTER, BIAS);
};
template <typename T, class Context>
class DeformConvGradientOp final : public DeformConvOpBase<T, Context> {
public:
USE_DEFORMABLE_CONV_BASE_FUNCTIONS(T, Context);
explicit DeformConvGradientOp(const OperatorDef& operator_def, Workspace* ws)
: DeformConvOpBase<T, Context>(operator_def, ws),
no_bias_(this->template GetSingleArgument<int>("no_bias", 0)) {
CAFFE_ENFORCE(
!(no_bias_ && OutputSize() == 4),
"If bias is not present, you should not have 4 grad output.");
}
~DeformConvGradientOp() {}
bool RunOnDeviceWithOrderNCHW() override;
private:
Tensor col_buffer_;
Tensor bias_multiplier_;
Tensor img_shape_device_{Context::GetDeviceType()};
Tensor col_buffer_shape_device_{Context::GetDeviceType()};
bool no_bias_;
// input: X, W, dY
// output: dO, dW, db, and optionally dX
INPUT_TAGS(INPUT, OFFSET, FILTER, OUTPUT_GRAD);
OUTPUT_TAGS(OFFSET_GRAD, FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_DEFORM_CONV_OP_H_
| 3,543
| 30.362832
| 79
|
h
|
null |
pytorch-main/caffe2/operators/deform_conv_op_impl.h
|
// conv_op_impl.h is the templated implementation of the conv_op.h file.
#ifndef CAFFE2_OPERATORS_DEFORM_CONV_OP_IMPL_H_
#define CAFFE2_OPERATORS_DEFORM_CONV_OP_IMPL_H_
#include "caffe2/core/context.h"
#include "caffe2/core/flags.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/operators/deform_conv_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
bool DeformConvOp<T, Context>::RunOnDeviceWithOrderNCHW() {
const Tensor& X = Input(INPUT);
const Tensor& offset = Input(OFFSET);
auto& filter = Input(FILTER);
Tensor* Y = Output(0);
const int N = X.dim32(0), C = X.dim32(1);
CAFFE_ENFORCE_EQ(X.dim(), filter.dim());
const int M = filter.dim32(0);
CAFFE_ENFORCE(
C == filter.dim32(1) * group_,
"Convolution op: input channels does not match: # of input channels ",
C,
" is not equal to kernel channels * group:",
filter.dim32(1),
"*",
group_);
CAFFE_ENFORCE(
M % group_ == 0,
"The number of output channels is not divisible by group.");
CAFFE_ENFORCE(
kernel_.size() == 2,
"Deformable convolution only supports 2d kernel, has ",
kernel_.size(),
"d kernel.");
CAFFE_ENFORCE(
offset.dim() == 4,
"Deformable convolution only supports 4d offset, has ",
offset.dim(),
"d offset.");
CAFFE_ENFORCE_EQ(offset.dim32(0), N);
CAFFE_ENFORCE(
C % deformable_group_ == 0,
"The number of input channels ",
C,
" is not divisible by deformable group ",
deformable_group_);
CAFFE_ENFORCE(
M % deformable_group_ == 0,
"The number of output channels ",
M,
" is not divisible by deformable group ",
deformable_group_);
CAFFE_ENFORCE(
offset.dim32(1) == 2 * kernel_h() * kernel_w() * deformable_group_,
"Deformable convolution: offset 1st dimension must equal "
"2 * kernel_h * kernel_w * deformable_group: 2 * ",
kernel_h(),
" * ",
kernel_w(),
" * ",
deformable_group_);
CAFFE_ENFORCE_EQ(
offset.dim32(2),
(X.dim32(2) + pad_t() + pad_b() - (dilation_h() * (kernel_h() - 1) + 1)) /
stride_h() +
1);
CAFFE_ENFORCE_EQ(
offset.dim32(3),
(X.dim32(3) + pad_l() + pad_r() - (dilation_w() * (kernel_w() - 1) + 1)) /
stride_w() +
1);
int kernel_dims_size = 1;
for (const auto i : c10::irange(kernel_.size())) {
CAFFE_ENFORCE(filter.dim32(i + 2) == kernel_[i]);
kernel_dims_size *= kernel_[i];
}
ConvPoolOpBase<Context>::SetOutputSize(X, Y, filter.dim32(0));
const vector<int> input_dims = GetDims(X);
const vector<int> output_dims = GetDims(*Y);
const int input_image_size = this->GetDimsSize(X);
const int output_image_size = this->GetDimsSize(*Y);
vector<int> img_shape;
img_shape.assign(X.sizes().begin() + 1, X.sizes().end());
vector<int> buffer_shape;
buffer_shape.push_back(C / group_ * kernel_dims_size);
buffer_shape.insert(
buffer_shape.end(), output_dims.begin(), output_dims.end());
// The dimension of each kernel
const int kernel_dim = C / group_ * kernel_dims_size;
// The offset corresponding to a single input image, and a single output
// image.
const int input_offset = C / group_ * input_image_size;
const int output_offset = M / group_ * output_image_size;
const int offset_offset = offset.numel() / offset.dim32(0);
const int filter_offset = filter.numel() / group_;
// The col buffer is stored in CHW order as well - kernel_dim, and the height
// and width.
const T* Xdata = X.template data<T>();
const T* offset_data = offset.template data<T>();
if (InputSize() == 4) {
auto& bias = Input(BIAS);
CAFFE_ENFORCE(bias.dim() == 1);
CAFFE_ENFORCE(bias.dim32(0) == M);
if (bias_multiplier_.numel() != output_image_size) {
// If the helper bias multiplier is not image size, reshape and fill it
// with
// one.
ReinitializeTensor(
&bias_multiplier_,
vector<int64_t>(1, output_image_size),
at::dtype<T>().device(Context::GetDeviceType()));
math::Set<T, Context>(
output_image_size,
static_cast<T>(1),
bias_multiplier_.template mutable_data<T>(),
&context_);
}
}
T* Ydata = Y->template mutable_data<T>();
const T* bias_data = nullptr;
if (InputSize() == 4) {
bias_data = Input(BIAS).template data<T>();
}
auto f = [this,
&filter_offset,
&bias_data,
&X,
&buffer_shape,
&N,
&Xdata,
&offset_data,
&M,
&filter,
&output_image_size,
&kernel_dim,
&Ydata,
&input_offset,
&offset_offset,
&output_offset](Tensor* col_buffer) {
col_buffer->Resize(buffer_shape);
T* col_buffer_data = col_buffer->template mutable_data<T>();
// Im2col, followed by gemm.
for (const auto image_id : c10::irange(N)) {
(void)image_id; // CUDA-10.2 on Windows crashes when C10_UNUSED macro is used
for (const auto group_id : c10::irange(group_)) {
DeformableIm2col(
Xdata + group_id * input_offset,
offset_data,
X.sizes(),
col_buffer->sizes(),
col_buffer_data);
// Weight term
math::Gemm<T, Context>(
CblasNoTrans,
CblasNoTrans,
M / group_,
output_image_size,
kernel_dim,
1,
filter.template data<T>() + group_id * filter_offset,
col_buffer_data,
0,
Ydata + group_id * output_offset,
&context_);
}
if (bias_data) {
math::Gemm<T, Context>(
CblasNoTrans,
CblasNoTrans,
M,
output_image_size,
1,
1,
bias_data,
bias_multiplier_.template data<T>(),
1,
Ydata,
&context_);
}
Xdata += input_offset * group_;
Ydata += output_offset * group_;
offset_data += offset_offset;
}
};
if (FLAGS_caffe2_force_shared_col_buffer || shared_buffer_) {
runWithSharedBuffer<Context>(ws_, f);
} else {
f(&col_buffer_);
}
return true;
}
template <typename T, class Context>
bool DeformConvGradientOp<T, Context>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(INPUT);
auto& offset = Input(OFFSET);
auto& filter = Input(FILTER);
auto& dY = Input(OUTPUT_GRAD);
const int N = X.dim32(0), C = X.dim32(1);
const vector<int> input_dims = this->GetDims(X);
const int input_image_size = this->GetDimsSize(X);
const vector<int> output_dims = this->GetDims(dY);
// The output image size is the spatial size of the output.
const int output_image_size = this->GetDimsSize(dY);
ConvPoolOpBase<Context>::ComputePads(input_dims);
CAFFE_ENFORCE_EQ(X.dim(), filter.dim());
const int M = filter.dim32(0);
CAFFE_ENFORCE(filter.dim32(1) * group_ == C);
CAFFE_ENFORCE(
kernel_.size() == 2,
"Deformable convolution only supports 2d kernel, has ",
kernel_.size(),
"d kernel.");
CAFFE_ENFORCE(
offset.dim() == 4,
"Deformable convolution only supports 4d offset, has ",
offset.dim(),
"d offset.");
CAFFE_ENFORCE_EQ(offset.dim32(0), N);
CAFFE_ENFORCE(
C % deformable_group_ == 0,
"The number of input channels ",
C,
" is not divisible by deformable group ",
deformable_group_);
CAFFE_ENFORCE(
M % deformable_group_ == 0,
"The number of output channels ",
M,
" is not divisible by deformable group ",
deformable_group_);
CAFFE_ENFORCE(
offset.dim32(1) == 2 * kernel_h() * kernel_w() * deformable_group_,
"Deformable convolution: offset 1st dimension must equal "
"2 * kernel_h * kernel_w * deformable_group: 2 * ",
kernel_h(),
" * ",
kernel_w(),
" * ",
deformable_group_);
CAFFE_ENFORCE_EQ(
offset.dim32(2),
(X.dim32(2) + pad_t() + pad_b() - (dilation_h() * (kernel_h() - 1) + 1)) /
stride_h() +
1);
CAFFE_ENFORCE_EQ(
offset.dim32(3),
(X.dim32(3) + pad_l() + pad_r() - (dilation_w() * (kernel_w() - 1) + 1)) /
stride_w() +
1);
int kernel_dims_size = 1;
for (const auto i : c10::irange(kernel_.size())) {
CAFFE_ENFORCE(filter.dim32(i + 2) == kernel_[i]);
kernel_dims_size *= kernel_[i];
}
CAFFE_ENFORCE(M % group_ == 0);
auto* dfilter = Output(FILTER_GRAD, filter.sizes(), at::dtype<T>());
auto* doffset = Output(OFFSET_GRAD, offset.sizes(), at::dtype<T>());
// The dimension of each kernel
const int kernel_dim = C / group_ * kernel_dims_size;
// The offset corresponding to a single input image, and a single output
// image.
const int input_offset = C / group_ * input_image_size;
const int output_offset = M / group_ * output_image_size;
const int offset_offset = offset.numel() / offset.dim32(0);
const int filter_offset = filter.numel() / group_;
// The col buffer is stored in CHW order as well - kernel_dim, and the
// height and width.
vector<int64_t> img_shape;
img_shape.assign(X.sizes().begin() + 1, X.sizes().end());
vector<int64_t> col_buffer_shape;
col_buffer_shape.push_back(C * kernel_dims_size);
col_buffer_shape.insert(
col_buffer_shape.end(), output_dims.begin(), output_dims.end());
ReinitializeTensor(
&col_buffer_,
col_buffer_shape,
at::dtype<T>().device(Context::GetDeviceType()));
const int col_buffer_offset = col_buffer_.numel() / group_;
const T* Xdata = X.template data<T>();
const T* filter_data = filter.template data<T>();
const T* offset_data = offset.template data<T>();
const T* dYdata = dY.template data<T>();
T* col_buffer_data = col_buffer_.template mutable_data<T>();
T* dfilter_data = dfilter->template mutable_data<T>();
T* doffset_data = doffset->template mutable_data<T>();
// Pre-setting the gradients to zero.
math::Set<T, Context>(dfilter->numel(), 0, dfilter_data, &context_);
T* dbias_data = nullptr;
if (!no_bias_) {
auto* dbias = Output(BIAS_OR_INPUT_GRAD, {M}, at::dtype<T>());
if (bias_multiplier_.numel() != output_image_size) {
// If the helper bias multiplier is not M, reshape and fill it with one.
ReinitializeTensor(
&bias_multiplier_,
vector<int64_t>(1, output_image_size),
at::dtype<T>().device(Context::GetDeviceType()));
math::Set<T, Context>(
output_image_size,
static_cast<T>(1),
bias_multiplier_.template mutable_data<T>(),
&context_);
}
dbias_data = dbias->template mutable_data<T>();
math::Set<T, Context>(dbias->numel(), 0, dbias_data, &context_);
}
T* dXdata = nullptr;
if (OutputSize() == 4 || (no_bias_ && (OutputSize() == 3))) {
auto* dX = Output(
no_bias_ ? BIAS_OR_INPUT_GRAD : INPUT_GRAD, X.sizes(), at::dtype<T>());
dXdata = dX->template mutable_data<T>();
math::Set<T, Context>(dX->numel(), 0, dXdata, &context_);
}
for (const auto image_id : c10::irange(N)) {
(void)image_id; // CUDA-10.2 on Windows crashes when C10_UNUSED macro is used
for (const auto group_id : c10::irange(group_)) {
math::Gemm<T, Context>(
CblasTrans,
CblasNoTrans,
kernel_dim,
output_image_size,
M / group_,
1,
filter_data + group_id * filter_offset,
dYdata + group_id * output_offset,
0,
col_buffer_data + group_id * col_buffer_offset,
&context_);
}
// Gradient with respect to offsets
DeformableCol2imCoord(
col_buffer_data,
Xdata,
offset_data,
X.sizes(),
col_buffer_shape,
doffset_data);
// Gradient with respect to input data
if (dXdata) {
DeformableCol2im(
col_buffer_data, offset_data, X.sizes(), col_buffer_shape, dXdata);
dXdata += input_offset * group_;
}
// Gradient with respect to filter
DeformableIm2col(
Xdata, offset_data, X.sizes(), col_buffer_shape, col_buffer_data);
for (const auto group_id : c10::irange(group_)) {
math::Gemm<T, Context>(
CblasNoTrans,
CblasTrans,
M / group_,
kernel_dim,
output_image_size,
1,
dYdata + group_id * output_offset,
col_buffer_data + group_id * col_buffer_offset,
1,
dfilter_data + group_id * filter_offset,
&context_);
}
// Gradient with respect to bias
if (dbias_data) {
math::Gemv<T, Context>(
CblasNoTrans,
M,
output_image_size,
1,
dYdata,
bias_multiplier_.template data<T>(),
1,
dbias_data,
&context_);
}
Xdata += input_offset * group_;
dYdata += output_offset * group_;
offset_data += offset_offset;
doffset_data += offset_offset;
}
return true;
}
} // namespace caffe2
#endif // CAFFE2_OPERATORS_DEFORM_CONV_OP_IMPL_H_
| 13,323
| 30.498818
| 83
|
h
|
null |
pytorch-main/caffe2/operators/dense_vector_to_id_list_op.h
|
#ifndef CAFFE2_OPERATORS_DENSE_VECTOR_TO_ID_LIST_OP_H_
#define CAFFE2_OPERATORS_DENSE_VECTOR_TO_ID_LIST_OP_H_
#include <set>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class DenseVectorToIdListOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(DenseVectorToIdListOp)
template <typename T, typename M>
bool DoRunWithType() {
auto& input = Input(0);
const auto* input_data = input.template data<T>();
CAFFE_ENFORCE_EQ(input.dim(), 2, "Sample should be 2-D");
const auto batch_size = input.size(0);
const auto col_num = input.size(1);
auto* out_lengths = Output(0, {batch_size}, at::dtype<int32_t>());
auto* out_lengths_data = out_lengths->template mutable_data<int32_t>();
auto* out_values = Output(1, {batch_size * col_num}, at::dtype<M>());
auto* out_values_data = out_values->template mutable_data<M>();
auto v_pos = 0;
auto l_pos = 0;
for (const auto i : c10::irange(batch_size)) {
auto length = 0;
for (const auto j : c10::irange(col_num)) {
if ((int)(input_data[i * col_num + j] + 0.5) != 0) {
out_values_data[v_pos++] = j;
length++;
}
}
out_lengths_data[l_pos++] = length;
}
out_values->Resize(v_pos);
out_lengths->Resize(l_pos);
return true;
}
bool RunOnDevice() override {
if (Input(0).template IsType<float>()) {
return DoRunWithType<float, int>();
} else {
CAFFE_THROW(
"DenseVectorToIdList operator only supports 32-bit float, but",
" input was of type ",
Input(0).dtype().name());
}
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_DENSE_VECTOR_TO_ID_LIST_OP_H_
| 1,841
| 26.492537
| 75
|
h
|
null |
pytorch-main/caffe2/operators/distance_op.h
|
#ifndef CAFFE2_OPERATORS_DISTANCE_OP_H_
#define CAFFE2_OPERATORS_DISTANCE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <typename T, class Context>
class SquaredL2DistanceOp : public Operator<Context> {
public:
template <class... Args>
explicit SquaredL2DistanceOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: X, Y; Output: Distance
};
template <typename T, class Context>
class SquaredL2DistanceGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit SquaredL2DistanceGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto& X = Input(0);
auto& Y = Input(1);
auto& dDistance = Input(2);
int N = X.dim() > 0 ? X.dim32(0) : 1;
int D = N > 0 ? X.numel() / N : 0;
CAFFE_ENFORCE(X.dim() == Y.dim());
for (const auto i : c10::irange(X.dim())) {
CAFFE_ENFORCE(X.dim32(i) == Y.dim32(i));
}
CAFFE_ENFORCE(dDistance.dim() == 1);
CAFFE_ENFORCE(dDistance.dim32(0) == N);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
auto* dY = Output(1, Y.sizes(), at::dtype<T>());
math::Sub<T, Context>(
X.numel(),
X.template data<T>(),
Y.template data<T>(),
dX->template mutable_data<T>(),
&context_);
for (const auto i : c10::irange(N)) {
math::Scale<T, T, Context>(
D,
dDistance.template data<T>() + i,
dX->template data<T>() + i * D,
dX->template mutable_data<T>() + i * D,
&context_);
}
// The gradient of the other side is basically the negative.
math::Scale<T, T, Context>(
X.numel(),
-1,
dX->template data<T>(),
dY->template mutable_data<T>(),
&context_);
return true;
}
protected:
// Input: X, Y, dDistance; Output: dX, dY
};
template <typename T, class Context>
class L1DistanceOp : public Operator<Context> {
public:
template <class... Args>
explicit L1DistanceOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: X, Y; Output: Distance
};
template <typename T, class Context>
class L1DistanceGradientOp : public Operator<Context> {
public:
template <class... Args>
explicit L1DistanceGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: X, Y, dDistance; Output: dX, dY
};
template <typename T, class Context>
class DotProductOp : public Operator<Context> {
public:
template <class... Args>
explicit DotProductOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
INPUT_TAGS(X_IN, Y_IN);
OUTPUT_TAGS(DOT_OUT);
};
template <typename T, class Context>
class DotProductGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit DotProductGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
INPUT_TAGS(X_IN, Y_IN, DER_DOT_IN);
OUTPUT_TAGS(DER_X_OUT, DER_Y_OUT);
};
template <typename T, class Context>
class DotProductWithPaddingOp : public Operator<Context> {
public:
template <class... Args>
explicit DotProductWithPaddingOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
pad_value_(this->template GetSingleArgument<float>("pad_value", 0.0)),
replicate_(this->template GetSingleArgument<bool>("replicate", false)) {
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
float pad_value_;
bool replicate_;
INPUT_TAGS(X_IN, Y_IN);
OUTPUT_TAGS(DOT_OUT);
};
template <typename T, class Context>
class CosineSimilarityOp : public Operator<Context> {
public:
template <class... Args>
explicit CosineSimilarityOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
INPUT_TAGS(X_IN, Y_IN);
OUTPUT_TAGS(COS_OUT);
private:
Tensor aux_;
};
template <typename T, class Context>
class CosineSimilarityGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit CosineSimilarityGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
INPUT_TAGS(X_IN, Y_IN, DER_COS_IN);
OUTPUT_TAGS(DER_X_OUT, DER_Y_OUT);
private:
Tensor aux_;
};
template <typename T, class Context>
class DotProductWithPaddingGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit DotProductWithPaddingGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
pad_value_(this->template GetSingleArgument<float>("pad_value", 0.0)),
replicate_(this->template GetSingleArgument<bool>("replicate", false)) {
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto& X = Input(X_IN);
auto& Y = Input(Y_IN);
auto& dDot = Input(DER_DOT_IN);
int N, D, DX, DY, restD;
if (X.numel() > 0) {
N = X.dim() > 0 ? X.dim32(0) : 1;
DX = X.numel() / N;
DY = Y.numel() / N;
} else {
N = 0;
DX = 0;
DY = 0;
}
CAFFE_ENFORCE(!replicate_ || DX % DY == 0 || DY % DX == 0);
D = std::min(DX, DY);
restD = std::max(DX, DY) - D;
CAFFE_ENFORCE_EQ(X.dim(), Y.dim());
CAFFE_ENFORCE_EQ(X.dim32(0), Y.dim32(0));
CAFFE_ENFORCE_EQ(dDot.dim(), 1);
CAFFE_ENFORCE_EQ(dDot.dim32(0), N);
auto* dX = Output(DER_X_OUT, X.sizes(), at::dtype<T>());
auto* dY = Output(DER_Y_OUT, Y.sizes(), at::dtype<T>());
const auto* X_data = X.template data<T>();
const auto* Y_data = Y.template data<T>();
const auto* dDot_data = dDot.template data<T>();
auto* dX_data = dX->template mutable_data<T>();
auto* dY_data = dY->template mutable_data<T>();
for (const auto i : c10::irange(N)) { // TODO: multithreading
auto offsetX = i * DX;
auto offsetY = i * DY;
if (replicate_) {
// L_ for longer vector and S_ for shorter vector
const T *L_data, *S_data;
T *dL_data, *dS_data;
int DL, DS;
if (DX > DY) {
L_data = X_data + offsetX;
S_data = Y_data + offsetY;
dL_data = dX_data + offsetX;
dS_data = dY_data + offsetY;
DL = DX;
DS = DY;
} else {
L_data = Y_data + offsetY;
S_data = X_data + offsetX;
dL_data = dY_data + offsetY;
dS_data = dX_data + offsetX;
DL = DY;
DS = DX;
}
// TODO: get rid of temp memory use
std::vector<T> tmp_data(DS);
math::Set<T, Context>(DS, 0.0, dS_data, &context_);
for (int j = 0; j < DL / DS; j++) {
math::Scale<T, T, Context>(
DS, dDot_data[i], S_data, dL_data + j * DS, &context_);
math::Scale<T, T, Context>(
DS, dDot_data[i], L_data + j * DS, tmp_data.data(), &context_);
math::Axpy<float, T, Context>(
DS, 1.0, tmp_data.data(), dS_data, &context_);
}
} else {
math::Scale<T, T, Context>(
D, dDot_data[i], X_data + offsetX, dY_data + offsetY, &context_);
math::Scale<T, T, Context>(
D, dDot_data[i], Y_data + offsetY, dX_data + offsetX, &context_);
}
if (!replicate_ && DX != DY) {
T* rest_data;
if (DX > DY) {
rest_data = dX_data + offsetX + D;
} else {
rest_data = dY_data + offsetY + D;
}
auto pad_gradient = dDot_data[i] * pad_value_;
math::Set<T, Context>(restD, pad_gradient, rest_data, &context_);
}
}
return true;
}
protected:
float pad_value_;
bool replicate_;
INPUT_TAGS(X_IN, Y_IN, DER_DOT_IN);
OUTPUT_TAGS(DER_X_OUT, DER_Y_OUT);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_DISTANCE_OP_H_
| 8,472
| 27.432886
| 80
|
h
|
null |
pytorch-main/caffe2/operators/do_op.h
|
#ifndef CAFFE2_OPERATORS_DO_OP_H_
#define CAFFE2_OPERATORS_DO_OP_H_
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/create_scope_op.h"
#include "caffe2/proto/caffe2_pb.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class DoOp final : public Operator<Context> {
public:
explicit DoOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws), parent_ws_(ws) {
CAFFE_ENFORCE(
this->template HasSingleArgumentOfType<NetDef>("net"),
"net must be specified in Do operator");
net_def_ = this->template GetSingleArgument<NetDef>("net", NetDef());
is_gradient_op_ = operator_def.is_gradient_op();
copy_external_blobs_ =
this->template GetSingleArgument<bool>("copy_external_blobs", false);
reuse_workspace_ =
this->template GetSingleArgument<bool>("reuse_workspace", false);
CAFFE_ENFORCE(
!(is_gradient_op_ && reuse_workspace_),
"Gradient Do op requires use of stacked workspaces");
CAFFE_ENFORCE(
!(copy_external_blobs_ && reuse_workspace_),
"Reuse workspace and copy external blobs simultaneously in Do op");
const auto& inner_blobs =
this->template GetRepeatedArgument<std::string>("inner_blobs");
const auto& outer_blobs_idx =
this->template GetRepeatedArgument<int>("outer_blobs_idx");
CAFFE_ENFORCE_EQ(
inner_blobs.size(),
outer_blobs_idx.size(),
"Invalid blob bindings: different inner/outer blobs lengths");
const auto& outer_blob_names = checkAndGetOuterNames(operator_def);
std::unordered_set<std::string> used_outer_names;
for (const auto blob_idx : c10::irange(inner_blobs.size())) {
CAFFE_ENFORCE(
!blob_bindings_.count(inner_blobs[blob_idx]),
"Invalid blob bindings: redefinition of inner blob " +
inner_blobs[blob_idx]);
CAFFE_ENFORCE(
outer_blobs_idx[blob_idx] >= 0 &&
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
outer_blobs_idx[blob_idx] < outer_blob_names.size(),
"Invalid blob bindings: outer blob index (" +
c10::to_string(outer_blobs_idx[blob_idx]) + ", inner name: " +
inner_blobs[blob_idx] + ") is out of bounds [0, " +
c10::to_string(outer_blob_names.size() - 1) + "]");
const auto& outer_name = outer_blob_names[outer_blobs_idx[blob_idx]];
CAFFE_ENFORCE(
!used_outer_names.count(outer_name),
"Reusage of outer name: " + outer_name);
used_outer_names.insert(outer_name);
blob_bindings_[inner_blobs[blob_idx]] = outer_name;
forwarded_inner_blobs_.insert(inner_blobs[blob_idx]);
}
std::unordered_set<std::string> all_outer_names(
outer_blob_names.begin(), outer_blob_names.end());
CAFFE_ENFORCE_EQ(
used_outer_names.size(),
all_outer_names.size(),
"Not all outer names are used in blob bindings");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto* ws_stack =
this->template Output<detail::WorkspaceStack>(OutputSize() - 1);
std::shared_ptr<Workspace> net_workspace;
if (is_gradient_op_) {
net_workspace =
ws_stack->popGradientWorkspace(parent_ws_, blob_bindings_);
} else {
if (reuse_workspace_ && !ws_stack->empty()) {
net_workspace =
ws_stack->reuseLastForwardWorkspace(parent_ws_, blob_bindings_);
} else {
net_workspace =
ws_stack->pushForwardWorkspace(parent_ws_, blob_bindings_);
}
}
CAFFE_ENFORCE(net_workspace, "Failed to initialize Do op workspace");
// TODO(iliacher): figure how to reuse existing net with a new workspace
auto* net = net_workspace->GetNet(net_def_.name());
if (!net) {
net = net_workspace->CreateNet(net_def_, true);
}
CAFFE_ENFORCE(net, "Failed to initialize subnet");
auto success = net->Run();
if (!is_gradient_op_ && copy_external_blobs_) {
net_workspace->template CopyForwardedTensors<Context>(
forwarded_inner_blobs_);
}
return success;
}
private:
// returns vector of input blob names followed by output blob names in
// operator definition order; ensures that input (output) names are unique,
// checks number of input (output) blobs
std::vector<std::string> checkAndGetOuterNames(
const OperatorDef& operator_def) const {
auto input_names = getInputBlobNames(operator_def);
CAFFE_ENFORCE(!input_names.empty(), "Expected at least one input blob");
std::string input_ws_blob = input_names.back(); // copy
// removing blob that holds pointer op workspace
input_names.pop_back();
std::unordered_set<std::string> all_input_names(
input_names.begin(), input_names.end());
CAFFE_ENFORCE_EQ(
input_names.size(), all_input_names.size(), "Duplicate input blobs");
auto output_names = getOutputBlobNames(operator_def);
CAFFE_ENFORCE(!output_names.empty(), "Expected at least one output blob");
const auto& output_ws_blob = output_names.back();
CAFFE_ENFORCE_EQ(
input_ws_blob,
output_ws_blob,
"Expected same input/output workspace blob");
// remove blob that holds pointer to op workspace
output_names.pop_back();
std::unordered_set<std::string> all_output_names(
output_names.begin(), output_names.end());
CAFFE_ENFORCE_EQ(
output_names.size(), all_output_names.size(), "Duplicate output blobs");
std::vector<std::string> outer_blob_names;
outer_blob_names.reserve(input_names.size() + output_names.size());
outer_blob_names.insert(
outer_blob_names.end(), input_names.begin(), input_names.end());
outer_blob_names.insert(
outer_blob_names.end(), output_names.begin(), output_names.end());
return outer_blob_names;
}
std::vector<std::string> getInputBlobNames(
const OperatorDef& operator_def) const {
std::vector<std::string> names;
names.reserve(operator_def.input_size());
for (const auto idx : c10::irange(operator_def.input_size())) {
names.push_back(operator_def.input(idx));
}
return names;
}
std::vector<std::string> getOutputBlobNames(
const OperatorDef& operator_def) const {
std::vector<std::string> names;
names.reserve(operator_def.output_size());
for (const auto idx : c10::irange(operator_def.output_size())) {
names.push_back(operator_def.output(idx));
}
return names;
}
std::unordered_map<std::string, std::string> blob_bindings_;
std::unordered_set<std::string> forwarded_inner_blobs_;
bool is_gradient_op_;
bool copy_external_blobs_;
bool reuse_workspace_;
NetDef net_def_;
Workspace* parent_ws_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_DO_OP_H_
| 7,007
| 36.677419
| 80
|
h
|
null |
pytorch-main/caffe2/operators/dropout_op.h
|
#ifndef CAFFE2_OPERATORS_DROPOUT_OP_H_
#define CAFFE2_OPERATORS_DROPOUT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class DropoutOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit DropoutOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
ratio_(this->template GetSingleArgument<float>("ratio", 0.5)),
is_test_(
this->template GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)) {
CAFFE_ENFORCE_GE(ratio_, 0);
}
bool RunOnDevice() override;
protected:
float ratio_;
bool is_test_;
// Input: X; Output: Y, mask.
};
template <typename T, class Context>
class DropoutGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit DropoutGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
ratio_(this->template GetSingleArgument<float>("ratio", 0.5)),
is_test_(
this->template GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)) {
CAFFE_ENFORCE_GE(ratio_, 0);
}
bool RunOnDevice() override;
protected:
float ratio_;
bool is_test_;
// Input: dY, mask; Output: dX
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_DROPOUT_OP_H_
| 1,450
| 24.910714
| 77
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_add_op.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_ADD_OP_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_ADD_OP_H_
#include <algorithm>
#include <functional>
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/operators/elementwise_ops_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
struct AddFunctor {
template <typename TIn, typename TOut>
bool Forward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TIn* A,
const TIn* B,
TOut* C,
Context* context) const {
math::Add(
A_dims.size(),
A_dims.data(),
B_dims.size(),
B_dims.data(),
A,
B,
C,
context);
return true;
}
template <typename TGrad, typename TIn, typename TOut>
bool Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC,
const TIn* /* A */,
const TIn* /* B */,
const TOut* /* C */,
TGrad* dA,
TGrad* dB,
Context* context) const {
const std::vector<int> C_dims =
elementwise_ops_utils::ComputeBinaryBroadcastForwardDims(
A_dims, B_dims);
std::vector<int> A_back_dims;
std::vector<int> B_back_dims;
elementwise_ops_utils::ComputeBinaryBroadcastBackwardDims(
A_dims, B_dims, &A_back_dims, &B_back_dims);
math::ReduceSum(
C_dims.size(),
C_dims.data(),
A_back_dims.data(),
TGrad(1),
dC,
dA,
context,
true);
math::ReduceSum(
C_dims.size(),
C_dims.data(),
B_back_dims.data(),
TGrad(1),
dC,
dB,
context,
true);
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_ADD_OP_H_
| 1,820
| 22.050633
| 65
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_div_op.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_DIV_OP_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_DIV_OP_H_
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
struct DivFunctor {
template <typename TIn, typename TOut>
bool Forward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TIn* A,
const TIn* B,
TOut* C,
Context* context) const {
math::Div(
A_dims.size(),
A_dims.data(),
B_dims.size(),
B_dims.data(),
A,
B,
C,
context);
return true;
}
template <typename TGrad, typename TIn, typename TOut>
bool Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC_data,
const TIn* A_data,
const TIn* B_data,
const TOut* C_data,
TGrad* dA_data,
TGrad* dB_data,
Context* context) const;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_DIV_OP_H_
| 1,062
| 20.693878
| 56
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_linear_op.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_LINEAR_OP_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_LINEAR_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context, class Engine = DefaultEngine>
class ElementwiseLinearOp final : public Operator<Context> {
public:
template <class... Args>
explicit ElementwiseLinearOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axis_(this->template GetSingleArgument<int>("axis", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
int axis_;
};
template <typename T, class Context, class Engine = DefaultEngine>
class ElementwiseLinearGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit ElementwiseLinearGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axis_(this->template GetSingleArgument<int>("axis", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
int axis_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_LINEAR_OP_H_
| 1,170
| 26.880952
| 68
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_logical_ops.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_LOGICAL_OPS_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_LOGICAL_OPS_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/elementwise_ops.h"
#include <unordered_set>
namespace caffe2 {
template <class Context>
class WhereOp final : public Operator<Context> {
public:
USE_OPERATOR_FUNCTIONS(Context);
USE_DISPATCH_HELPER;
template <class... Args>
explicit WhereOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "broadcast_on_rows", enable_broadcast_, 0) {}
bool RunOnDevice() override {
return DispatchHelper<
TensorTypes<float, double, int, long, std::string, bool>>::
call(this, Input(1));
}
template <typename T>
bool DoRunWithType() {
auto& select = Input(0);
auto& left = Input(1);
auto& right = Input(2);
if (enable_broadcast_) {
CAFFE_ENFORCE_EQ(select.dim(), 1);
CAFFE_ENFORCE_EQ(select.size(0), right.size(0));
CAFFE_ENFORCE_EQ(left.sizes(), right.sizes());
} else {
CAFFE_ENFORCE_EQ(select.sizes(), left.sizes());
CAFFE_ENFORCE_EQ(select.sizes(), right.sizes());
}
auto* output = Output(0, left.sizes(), at::dtype<T>());
const bool* select_data = select.template data<bool>();
const T* left_data = left.template data<T>();
const T* right_data = right.template data<T>();
T* output_data = output->template mutable_data<T>();
if (enable_broadcast_) {
size_t block_size = left.size_from_dim(1);
for (const auto i : c10::irange(select.numel())) {
size_t offset = i * block_size;
if (select_data[i]) {
context_.CopyItemsSameDevice(
output->dtype(),
block_size,
left_data + offset,
output_data + offset);
} else {
context_.CopyItemsSameDevice(
output->dtype(),
block_size,
right_data + offset,
output_data + offset);
}
}
} else {
for (const auto i : c10::irange(select.numel())) {
output_data[i] = select_data[i] ? left_data[i] : right_data[i];
}
}
return true;
}
private:
bool enable_broadcast_;
};
class IsMemberOfValueHolder {
std::unordered_set<int32_t> int32_values_;
std::unordered_set<int64_t> int64_values_;
std::unordered_set<bool> bool_values_;
std::unordered_set<std::string> string_values_;
bool has_values_ = false;
public:
template <typename T>
std::unordered_set<T>& get();
template <typename T>
void set(const std::vector<T>& args) {
has_values_ = true;
auto& values = get<T>();
values.insert(args.begin(), args.end());
}
bool has_values() {
return has_values_;
}
};
template <class Context>
class IsMemberOfOp final : public Operator<Context> {
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_DISPATCH_HELPER;
static constexpr const char* VALUE_TAG = "value";
public:
using TestableTypes = TensorTypes<int32_t, int64_t, bool, std::string>;
template <class... Args>
explicit IsMemberOfOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
auto dtype =
static_cast<TensorProto_DataType>(this->template GetSingleArgument<int>(
"dtype", TensorProto_DataType_UNDEFINED));
switch (dtype) {
case TensorProto_DataType_INT32:
values_.set(this->template GetRepeatedArgument<int32_t>(VALUE_TAG));
break;
case TensorProto_DataType_INT64:
values_.set(this->template GetRepeatedArgument<int64_t>(VALUE_TAG));
break;
case TensorProto_DataType_BOOL:
values_.set(this->template GetRepeatedArgument<bool>(VALUE_TAG));
break;
case TensorProto_DataType_STRING:
values_.set(this->template GetRepeatedArgument<std::string>(VALUE_TAG));
break;
case TensorProto_DataType_UNDEFINED:
// If dtype is not provided, values_ will be filled the first time that
// DoRunWithType is called.
break;
default:
CAFFE_THROW("Unexpected 'dtype' argument value: ", dtype);
}
}
~IsMemberOfOp() noexcept override {}
bool RunOnDevice() override {
return DispatchHelper<
TensorTypes<int32_t, int64_t, bool, std::string>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
auto& input = Input(0);
auto* output = Output(0, input.sizes(), at::dtype<bool>());
if (!values_.has_values()) {
values_.set(this->template GetRepeatedArgument<T>(VALUE_TAG));
}
const auto& values = values_.get<T>();
const T* input_data = input.template data<T>();
bool* output_data = output->template mutable_data<bool>();
for (const auto i : c10::irange(input.numel())) {
output_data[i] = values.find(input_data[i]) != values.end();
}
return true;
}
protected:
IsMemberOfValueHolder values_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_LOGICAL_OPS_H_
| 5,108
| 28.194286
| 80
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_mul_op.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_MUL_OP_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_MUL_OP_H_
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
struct MulFunctor {
template <typename TIn, typename TOut>
bool Forward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TIn* A,
const TIn* B,
TOut* C,
Context* context) const {
math::Mul(
A_dims.size(),
A_dims.data(),
B_dims.size(),
B_dims.data(),
A,
B,
C,
context);
return true;
}
template <typename TGrad, typename TIn, typename TOut>
bool Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC_data,
const TIn* A_data,
const TIn* B_data,
const TOut* C_data,
TGrad* dA_data,
TGrad* dB_data,
Context* context) const;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_MUL_OP_H_
| 1,062
| 20.693878
| 56
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_op_test.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_OP_TEST_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_OP_TEST_H_
#include "caffe2/operators/elementwise_ops.h"
#include <iostream>
#include <string>
#include <vector>
#include <gtest/gtest.h>
template <typename Context, typename T>
void CopyVector(const int N, const T* x, T* y);
template <typename Context, typename I_Type, typename O_Type>
void FillTensor(
caffe2::Workspace* ws,
const std::string& name,
const std::vector<int64_t>& shape,
const std::vector<I_Type>& values) {
auto* blob = ws->CreateBlob(name);
auto* tensor = BlobGetMutableTensor(blob, Context::GetDeviceType());
tensor->Resize(shape);
auto* mutable_data = tensor->template mutable_data<O_Type>();
const O_Type* data = reinterpret_cast<const O_Type*>(values.data());
CopyVector<Context, O_Type>(values.size(), data, mutable_data);
}
template <typename Context>
caffe2::OperatorDef CreateOperatorDef() {
caffe2::OperatorDef def;
return def;
}
template <typename Context>
caffe2::OperatorDef DefineOperator(const std::string& op_type) {
caffe2::OperatorDef def = CreateOperatorDef<Context>();
def.set_name("test");
def.set_type(op_type);
def.add_input("X");
def.add_input("Y");
def.add_output("Z");
return def;
}
template <typename Context>
void elementwiseAnd() {
const int N = 4;
const int M = 2;
caffe2::Workspace ws;
auto def = DefineOperator<Context>("And");
{ // equal size
FillTensor<Context, uint8_t, bool>(
&ws, "X", {N}, {true, false, true, false});
FillTensor<Context, uint8_t, bool>(
&ws, "Y", {N}, {true, true, false, false});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), N);
std::vector<bool> result{true, false, false, false};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
{ // broadcast
auto* arg = def.add_arg();
arg->set_name("broadcast");
arg->set_i(1);
FillTensor<Context, uint8_t, bool>(
&ws, "X", {M, N}, {true, false, true, false, true, false, true, false});
FillTensor<Context, uint8_t, bool>(
&ws, "Y", {N}, {true, true, false, false});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), M * N);
std::vector<bool> result{
true, false, false, false, true, false, false, false};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
}
template <typename Context>
void elementwiseOr() {
const int N = 4;
const int M = 2;
caffe2::Workspace ws;
auto def = DefineOperator<Context>("Or");
{ // equal size
FillTensor<Context, uint8_t, bool>(
&ws, "X", {N}, {true, false, true, false});
FillTensor<Context, uint8_t, bool>(
&ws, "Y", {N}, {true, true, false, false});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), N);
std::vector<bool> result{true, true, true, false};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
{ // broadcast
auto* arg = def.add_arg();
arg->set_name("broadcast");
arg->set_i(1);
FillTensor<Context, uint8_t, bool>(
&ws, "X", {M, N}, {true, false, true, false, true, false, true, false});
FillTensor<Context, uint8_t, bool>(
&ws, "Y", {N}, {true, true, false, false});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), M * N);
std::vector<bool> result{true, true, true, false, true, true, true, false};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
}
template <typename Context>
void elementwiseXor() {
const int N = 4;
const int M = 2;
caffe2::Workspace ws;
auto def = DefineOperator<Context>("Xor");
{ // equal size
FillTensor<Context, uint8_t, bool>(
&ws, "X", {N}, {true, false, true, false});
FillTensor<Context, uint8_t, bool>(
&ws, "Y", {N}, {true, true, false, false});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), N);
std::vector<bool> result{false, true, true, false};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
{ // broadcast
auto* arg = def.add_arg();
arg->set_name("broadcast");
arg->set_i(1);
FillTensor<Context, uint8_t, bool>(
&ws, "X", {M, N}, {true, false, true, false, true, false, true, false});
FillTensor<Context, uint8_t, bool>(
&ws, "Y", {N}, {true, true, false, false});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), M * N);
std::vector<bool> result{
false, true, true, false, false, true, true, false};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
}
template <typename Context>
void elementwiseNot() {
const int N = 2;
caffe2::Workspace ws;
caffe2::OperatorDef def = CreateOperatorDef<Context>();
def.set_name("test");
def.set_type("Not");
def.add_input("X");
def.add_output("Y");
FillTensor<Context, uint8_t, bool>(&ws, "X", {N}, {true, false});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Y");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Y(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Y.numel(), N);
std::vector<bool> result{false, true};
for (const auto i : c10::irange(Y.numel())) {
EXPECT_EQ(Y.template data<bool>()[i], result[i]);
}
}
template <typename Context>
void elementwiseEQ() {
const int N = 4;
const int M = 2;
caffe2::Workspace ws;
auto def = DefineOperator<Context>("EQ");
{ // equal size
FillTensor<Context, int32_t, int32_t>(&ws, "X", {N}, {1, 100, 5, -10});
FillTensor<Context, int32_t, int32_t>(&ws, "Y", {N}, {0, 100, 4, -10});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), N);
std::vector<bool> result{false, true, false, true};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
{ // boolean
FillTensor<Context, uint8_t, bool>(
&ws, "X", {N}, {true, false, false, true});
FillTensor<Context, uint8_t, bool>(
&ws, "Y", {N}, {true, false, true, false});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), N);
std::vector<bool> result{true, true, false, false};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
{ // broadcast
auto* arg = def.add_arg();
arg->set_name("broadcast");
arg->set_i(1);
FillTensor<Context, int32_t, int32_t>(
&ws, "X", {M, N}, {1, 100, 5, -10, 3, 6, -1000, 33});
FillTensor<Context, int32_t, int32_t>(&ws, "Y", {N}, {1, 6, -1000, -10});
std::unique_ptr<caffe2::OperatorBase> op(caffe2::CreateOperator(def, &ws));
EXPECT_NE(nullptr, op.get());
EXPECT_TRUE(op->Run());
auto* blob = ws.GetBlob("Z");
EXPECT_NE(nullptr, blob);
caffe2::Tensor Z(blob->Get<caffe2::Tensor>(), caffe2::CPU);
EXPECT_EQ(Z.numel(), M * N);
std::vector<bool> result{
true, false, false, true, false, true, true, false};
for (const auto i : c10::irange(Z.numel())) {
EXPECT_EQ(Z.template data<bool>()[i], result[i]);
}
}
}
#endif // CAFFE2_OPERATORS_ELEMENTWISE_OP_TEST_H_
| 9,287
| 33.786517
| 80
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_ops.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_OPS_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_OPS_H_
#include <iterator>
#include <string>
#include <tuple>
#include <vector>
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/operators/elementwise_ops_utils.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
using NumericTypes = TensorTypes<int32_t, int64_t, float, double>;
using IntTypes = TensorTypes<int32_t, int64_t>;
using BoolTypes = TensorTypes<bool>;
using IntBoolTypes = TensorTypes<int32_t, int64_t, bool>; // discrete types
struct SameTypeAsInput {
template <typename T>
using type = T;
};
template <typename R>
struct FixedType {
template <typename T>
using type = R;
};
template <
typename InputTypes,
class Context,
class Functor,
class OutputTypeMap = SameTypeAsInput>
class UnaryElementwiseWithArgsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit UnaryElementwiseWithArgsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...), functor_(*this) {}
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& X = Input(0);
auto* Y = Output(
0, X.sizes(), at::dtype<typename OutputTypeMap::template type<T>>());
return functor_(
X.numel(),
X.template data<T>(),
Y->template mutable_data<typename OutputTypeMap::template type<T>>(),
&context_);
}
private:
Functor functor_;
};
// UnaryFunctorWithDefaultCtor is a functor that can be used as the functor of
// an UnaryElementwiseWithArgsOp. It simply forwards the operator() call into
// another functor that doesn't accept arguments in its constructor.
template <class Functor>
struct UnaryFunctorWithDefaultCtor {
explicit UnaryFunctorWithDefaultCtor(OperatorBase& /* op */) {}
template <typename TIn, typename TOut, class Context>
bool operator()(const int size, const TIn* X, TOut* Y, Context* context)
const {
return functor(size, X, Y, context);
}
Functor functor{};
};
// UnaryElementwiseOp is a wrapper around UnaryElementwiseWithArgsOp, with the
// difference that it takes a functor with default constructor, e.g. that does
// not need to take into consideration any arguments during operator creation.
template <
typename InputTypes,
class Context,
class Functor,
class OutputTypeMap = SameTypeAsInput>
using UnaryElementwiseOp = UnaryElementwiseWithArgsOp<
InputTypes,
Context,
UnaryFunctorWithDefaultCtor<Functor>,
OutputTypeMap>;
template <
typename InputTypes,
class Context,
class Functor,
class OutputTypeMap = SameTypeAsInput>
class BinaryElementwiseWithArgsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BinaryElementwiseWithArgsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "broadcast", legacy_broadcast_, false),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, string("")),
OP_SINGLE_ARG(string, "order", order_, "NCHW"),
functor_(*this) {
if (legacy_broadcast_) {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0U,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1U, "Unsupported axis string", axis_str_);
const size_t semantic_axis_ = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis_,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis_;
} else {
CAFFE_ENFORCE(
axis_ == -1 && axis_str_.empty(),
"Do not specify axis or axis_str if broadcast is not enabled.");
}
}
}
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& A = Input(0);
const auto& B = Input(1);
const T* A_data = A.template data<T>();
const T* B_data = B.template data<T>();
std::vector<int> A_dims;
std::vector<int> B_dims;
std::vector<int64_t> C_dims;
if (legacy_broadcast_) {
CAFFE_ENFORCE(
!IsInputOutputAlias(1, 0),
"In-place is allowed only with the first tensor when "
"legacy-broadcasting");
C_dims = A.sizes().vec();
if (B.numel() == 1) {
A_dims = {static_cast<int>(A.numel())};
B_dims = {1};
} else {
size_t pre, n, post;
std::tie(pre, n, post) =
elementwise_ops_utils::ComputeLegacyBroadcastSizes(A, B, axis_);
A_dims = {
static_cast<int>(pre), static_cast<int>(n), static_cast<int>(post)};
B_dims = {static_cast<int>(n), 1};
}
} else {
A_dims.reserve(A.sizes().size());
B_dims.reserve(B.sizes().size());
std::copy(
A.sizes().cbegin(), A.sizes().cend(), std::back_inserter(A_dims));
std::copy(
B.sizes().cbegin(), B.sizes().cend(), std::back_inserter(B_dims));
// TODO: change the types to vector<int64_t>
auto C_dims_int =
elementwise_ops_utils::ComputeBinaryBroadcastForwardDims(
A_dims, B_dims);
C_dims.reserve(C_dims_int.size());
std::copy(
C_dims_int.cbegin(), C_dims_int.cend(), std::back_inserter(C_dims));
if (IsInputOutputAlias(0, 0)) {
CAFFE_ENFORCE_EQ(C_dims_int, A_dims);
} else if (IsInputOutputAlias(1, 0)) {
CAFFE_ENFORCE_EQ(C_dims_int, B_dims);
}
}
auto* C = Output(
0, C_dims, at::dtype<typename OutputTypeMap::template type<T>>());
auto* C_data =
C->template mutable_data<typename OutputTypeMap::template type<T>>();
return functor_.Forward(A_dims, B_dims, A_data, B_data, C_data, &context_);
}
private:
const bool legacy_broadcast_;
int axis_;
const std::string axis_str_;
const std::string order_;
Functor functor_;
};
template <
typename InputTypes,
class Context,
class Functor,
class OutputTypeMap = SameTypeAsInput,
class GradientTypeMap = SameTypeAsInput>
class BinaryElementwiseWithArgsGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BinaryElementwiseWithArgsGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "broadcast", legacy_broadcast_, false),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, ""),
OP_SINGLE_ARG(string, "order", order_, "NCHW"),
functor_(*this) {
if (legacy_broadcast_) {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0U,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1U, "Unsupported axis string", axis_str_);
const size_t semantic_axis_ = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis_,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis_;
} else {
CAFFE_ENFORCE(
axis_ == -1 && axis_str_.empty(),
"Do not specify axis or axis_str if broadcast is not enabled.");
}
}
}
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(1));
}
template <typename T>
bool DoRunWithType() {
const auto& dC = Input(0);
const auto& A = Input(1);
const auto& B = Input(2);
vector<int> A_dims;
vector<int> B_dims;
if (legacy_broadcast_) {
if (B.numel() == 1) {
A_dims = {static_cast<int>(A.numel())};
B_dims = {1};
} else {
size_t pre, n, post;
std::tie(pre, n, post) =
elementwise_ops_utils::ComputeLegacyBroadcastSizes(A, B, axis_);
A_dims = {
static_cast<int>(pre), static_cast<int>(n), static_cast<int>(post)};
B_dims = {static_cast<int>(n), 1};
}
} else {
std::copy(
A.sizes().cbegin(), A.sizes().cend(), std::back_inserter(A_dims));
std::copy(
B.sizes().cbegin(), B.sizes().cend(), std::back_inserter(B_dims));
}
const typename OutputTypeMap::template type<T>* C_data = nullptr;
if (InputSize() == 4) {
const auto& C = Input(3);
C_data = C.template data<typename OutputTypeMap::template type<T>>();
}
const auto* dC_data =
dC.template data<typename GradientTypeMap::template type<T>>();
const T* A_data = A.template data<T>();
const T* B_data = B.template data<T>();
auto* dA = Output(
0, A.sizes(), at::dtype<typename GradientTypeMap::template type<T>>());
auto* dB = Output(
1, B.sizes(), at::dtype<typename GradientTypeMap::template type<T>>());
auto* dA_data =
dA->template mutable_data<typename GradientTypeMap::template type<T>>();
auto* dB_data =
dB->template mutable_data<typename GradientTypeMap::template type<T>>();
return functor_.Backward(
A_dims,
B_dims,
dC_data,
A_data,
B_data,
C_data,
dA_data,
dB_data,
&context_);
}
private:
const bool legacy_broadcast_;
int axis_;
const std::string axis_str_;
const std::string order_;
Functor functor_;
};
template <class Functor>
struct BinaryFunctorWithDefaultCtor {
explicit BinaryFunctorWithDefaultCtor(OperatorBase& /* op */) {}
template <typename TIn, typename TOut, class Context>
bool Forward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TIn* A_data,
const TIn* B_data,
TOut* C_data,
Context* context) const {
return functor.Forward(A_dims, B_dims, A_data, B_data, C_data, context);
}
template <typename TGrad, typename TIn, typename TOut, class Context>
bool Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC_data,
const TIn* A_data,
const TIn* B_data,
const TOut* C_data,
TGrad* dA_data,
TGrad* dB_data,
Context* context) const {
return functor.Backward(
A_dims,
B_dims,
dC_data,
A_data,
B_data,
C_data,
dA_data,
dB_data,
context);
}
Functor functor{};
};
template <class Functor>
struct BinaryFunctorWithBroadcastOptionsCtor {
explicit BinaryFunctorWithBroadcastOptionsCtor(OperatorBase& op)
: functor{op.GetSingleArgument<bool>("allow_broadcast_fastpath", false)} {}
template <typename TIn, typename TOut, class Context>
bool Forward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TIn* A_data,
const TIn* B_data,
TOut* C_data,
Context* context) const {
return functor.Forward(A_dims, B_dims, A_data, B_data, C_data, context);
}
template <typename TGrad, typename TIn, typename TOut, class Context>
bool Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC_data,
const TIn* A_data,
const TIn* B_data,
const TOut* C_data,
TGrad* dA_data,
TGrad* dB_data,
Context* context) const {
return functor.Backward(
A_dims,
B_dims,
dC_data,
A_data,
B_data,
C_data,
dA_data,
dB_data,
context);
}
Functor functor;
};
// BinaryElementwiseOp is a wrapper around BinaryElementwiseWithArgsOp, with the
// difference that it takes a functor with default constructor, e.g. that does
// not need to take into consideration any arguments during operator creation.
template <
typename InputTypes,
class Context,
class Functor,
class TypeMap = SameTypeAsInput>
using BinaryElementwiseOp = BinaryElementwiseWithArgsOp<
InputTypes,
Context,
BinaryFunctorWithDefaultCtor<Functor>,
TypeMap>;
// BinaryElementwiseGradientOp is a wrapper around
// BinaryElementwiseGradientWithArgsOp, with the difference that it takes a
// functor with default constructor, e.g. that does not need to take into
// consideration any arguments during operator creation.
template <
typename InputTypes,
class Context,
class Functor,
class OutputTypeMap = SameTypeAsInput,
class GradientTypeMap = SameTypeAsInput>
using BinaryElementwiseGradientOp = BinaryElementwiseWithArgsGradientOp<
InputTypes,
Context,
BinaryFunctorWithDefaultCtor<Functor>,
OutputTypeMap,
GradientTypeMap>;
// BinaryElementwiseBroadcastOp is a wrapper around BinaryElementwiseWithArgsOp,
// with the difference that it takes a functor with a constructor that accepts
// broadcast-related arguments (just a single boolean for whether broadcast
// fastpaths are allowed at the time this comment was written).
template <
typename InputTypes,
class Context,
class Functor,
class TypeMap = SameTypeAsInput>
using BinaryElementwiseBroadcastOp = BinaryElementwiseWithArgsOp<
InputTypes,
Context,
BinaryFunctorWithBroadcastOptionsCtor<Functor>,
TypeMap>;
// BinaryElementwiseGradientBroadcastOp is a wrapper around
// BinaryElementwiseWithArgsGradientOp, with the difference that it takes a
// functor with a constructor that accepts broadcast-related arguments (just a
// single boolean for whether broadcast fastpaths are allowed at the time this
// comment was written).
template <
typename InputTypes,
class Context,
class Functor,
class OutputTypeMap = SameTypeAsInput,
class GradientTypeMap = SameTypeAsInput>
using BinaryElementwiseGradientBroadcastOp = BinaryElementwiseWithArgsGradientOp<
InputTypes,
Context,
BinaryFunctorWithBroadcastOptionsCtor<Functor>,
OutputTypeMap,
GradientTypeMap>;
// Forward-only Unary Functors.
template <class Context>
struct NotFunctor {
bool operator()(const int N, const bool* X, bool* Y, Context* context) const {
math::Not(N, X, Y, context);
return true;
}
};
template <class Context>
struct SignFunctor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const {
math::Sign(N, X, Y, context);
return true;
}
};
// Forward-only Binary Functors.
#define C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(FunctorName) \
template <class Context> \
struct FunctorName##Functor { \
template <typename TIn, typename TOut> \
bool Forward( \
const std::vector<int>& A_dims, \
const std::vector<int>& B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
Context* context) const { \
math::FunctorName( \
A_dims.size(), \
A_dims.data(), \
B_dims.size(), \
B_dims.data(), \
A, \
B, \
C, \
context); \
return true; \
} \
};
// Compare functors.
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(EQ);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(NE);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(LT);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(LE);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(GT);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(GE);
// Logical functors.
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(And);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(Or);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(Xor);
// Bitwise functors.
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(BitwiseAnd);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(BitwiseOr);
C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR(BitwiseXor);
#undef C10_DECLARE_FORWARD_ONLY_BINARY_FUNCTOR
namespace SRLHelper {
template <typename T>
void sum2one(const T* a, T* y, size_t n);
template <typename T>
void RunWithBroadcastFront(const T* a, T* y, size_t pre, size_t n, CPUContext*);
template <typename T>
void RunWithBroadcastBack(const T* a, T* y, size_t post, size_t n, CPUContext*);
template <typename T>
void RunWithBroadcast2(
const T* a,
T* y,
size_t pre,
size_t n,
size_t post,
CPUContext*);
} // namespace SRLHelper
// Sum reduction operator that is used for computing the gradient in cases
// where the forward op is in broadcast mode.
template <class Context>
class SumReduceLikeOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SumReduceLikeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, ""),
OP_SINGLE_ARG(string, "order", order_, "NCHW") {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0U,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1U, "Unsupported axis string", axis_str_);
size_t semantic_axis = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis;
}
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType();
private:
int axis_;
string axis_str_;
string order_;
Tensor ones_{Context::GetDeviceType()};
Tensor sum_buffer_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_OPS_H_
| 19,237
| 30.383361
| 81
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_ops_utils.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
#include <tuple>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
namespace elementwise_ops_utils {
TORCH_API std::tuple<size_t, size_t, size_t>
ComputeLegacyBroadcastSizes(const Tensor& A, const Tensor& B, int axis);
TORCH_API std::vector<int> ComputeBinaryBroadcastForwardDims(
const c10::ArrayRef<int>& A_dims,
const c10::ArrayRef<int>& B_dims);
TORCH_API void ComputeBinaryBroadcastBackwardAxes(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
std::vector<int>* A_axes,
std::vector<int>* B_axes);
TORCH_API void ComputeBinaryBroadcastBackwardDims(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
std::vector<int>* A_back_dims,
std::vector<int>* B_back_dims);
} // namespace elementwise_ops_utils
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_OPS_UTILS_H_
| 1,008
| 27.027778
| 72
|
h
|
null |
pytorch-main/caffe2/operators/elementwise_sub_op.h
|
#ifndef CAFFE2_OPERATORS_ELEMENTWISE_SUB_OP_H_
#define CAFFE2_OPERATORS_ELEMENTWISE_SUB_OP_H_
#include <algorithm>
#include <functional>
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/operators/elementwise_ops_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
struct SubFunctor {
template <typename TIn, typename TOut>
bool Forward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TIn* A,
const TIn* B,
TOut* C,
Context* context) const {
math::Sub(
A_dims.size(),
A_dims.data(),
B_dims.size(),
B_dims.data(),
A,
B,
C,
context);
return true;
}
template <typename TGrad, typename TIn, typename TOut>
bool Backward(
const std::vector<int>& A_dims,
const std::vector<int>& B_dims,
const TGrad* dC,
const TIn* /* A */,
const TIn* /* B */,
const TOut* /* C */,
TGrad* dA,
TGrad* dB,
Context* context) const {
const std::vector<int> C_dims =
elementwise_ops_utils::ComputeBinaryBroadcastForwardDims(
A_dims, B_dims);
std::vector<int> A_back_dims;
std::vector<int> B_back_dims;
elementwise_ops_utils::ComputeBinaryBroadcastBackwardDims(
A_dims, B_dims, &A_back_dims, &B_back_dims);
math::ReduceSum(
C_dims.size(),
C_dims.data(),
A_back_dims.data(),
TGrad(1),
dC,
dA,
context,
true);
math::ReduceSum(
C_dims.size(),
C_dims.data(),
B_back_dims.data(),
TGrad(-1),
dC,
dB,
context,
true);
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELEMENTWISE_SUB_OP_H_
| 1,821
| 22.063291
| 65
|
h
|
null |
pytorch-main/caffe2/operators/elu_op.h
|
#ifndef CAFFE2_OPERATORS_ELU_OP_H_
#define CAFFE2_OPERATORS_ELU_OP_H_
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
namespace caffe2 {
template <class Context>
struct EluFunctor {
explicit EluFunctor(OperatorBase& op)
: alpha(op.GetSingleArgument<float>("alpha", 1.0f)) {}
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const;
const float alpha;
};
template <class Context>
struct EluGradientFunctor {
explicit EluGradientFunctor(OperatorBase& op)
: alpha(op.GetSingleArgument<float>("alpha", 1.0f)) {}
template <typename T>
bool Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& dY_dims,
const T* Y,
const T* dY,
T* dX,
Context* context) const;
const float alpha;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ELU_OP_H_
| 875
| 20.365854
| 73
|
h
|
null |
pytorch-main/caffe2/operators/enforce_finite_op.h
|
#ifndef CAFFE_OPERATORS_ENFORCE_FINITE_OP_H_
#define CAFFE_OPERATORS_ENFORCE_FINITE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class EnforceFiniteOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit EnforceFiniteOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws), ws_(ws) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType();
private:
Workspace* ws_;
Tensor buffer_{CPU};
template <typename T>
void EnforceOnCPU(const Tensor& input) {
const T* input_data = input.template data<T>();
auto size = input.numel();
for (const auto i : c10::irange(size)) {
auto isfinite = std::isfinite(input_data[i]);
if (!isfinite) {
LogBlobFiniteness();
}
CAFFE_ENFORCE_FINITE(
isfinite,
"Index ",
i,
" is not finite (e.g., NaN, Inf): ",
input_data[i]);
}
}
// LogBlobFiniteness sums every tensor in the workspace and logs whether it's finite or not.
void LogBlobFiniteness() {
// This uses the aten interfaces to compute the sum and finiteness of the
// tensors which are not present by default on xplat and mobile builds.
#if defined(EXPOSE_C2_OPS) || \
!defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE)
for (const std::string& blob_name : ws_->Blobs()) {
try {
const auto& blob = ws_->GetBlob(blob_name);
if (blob != nullptr && blob->IsType<Tensor>()) {
Tensor* c2Tensor = blob->GetMutable<Tensor>();
const at::Tensor& tensor = static_cast<at::Tensor>(*c2Tensor);
bool blob_finite = tensor.sum().isfinite().cpu().data_ptr<bool>()[0];
LOG(INFO) << "blob " << blob_name << " isfinite=" << (blob_finite ? "true" : "false");
}
} catch (const std::exception& ex) {
LOG(ERROR) << "failed to check finiteness for " << blob_name << ": " << ex.what();
}
}
#endif
}
};
} // namespace caffe2
#endif // CAFFE_OPERATORS_ENFORCE_FINITE_OP_H_
| 2,339
| 29.789474
| 96
|
h
|
null |
pytorch-main/caffe2/operators/ensure_clipped_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class EnsureClippedOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit EnsureClippedOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
min_(std::numeric_limits<T>::lowest()),
max_(std::numeric_limits<T>::max()) {
if (HasArgument("min")) {
min_ = static_cast<T>(this->template GetSingleArgument<float>("min", 0));
}
if (HasArgument("max")) {
max_ = static_cast<T>(this->template GetSingleArgument<float>("max", 0));
}
}
bool RunOnDevice() override {
if (InputSize() > INDICES) {
// spares gradient, selective checking clipping
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).dim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
} else {
auto& X = Input(PARAM);
auto* Y = Output(OUTPUT_PARAM, X.sizes(), at::dtype<float>());
EigenVectorMap<float>(Y->template mutable_data<float>(), Y->numel()) =
ConstEigenVectorMap<float>(X.template data<float>(), X.numel())
.cwiseMax(min_)
.cwiseMin(max_);
return true;
}
}
template <typename SIndex>
bool DoRunWithType();
protected:
T min_;
T max_;
INPUT_TAGS(PARAM, INDICES, GRAD);
OUTPUT_TAGS(OUTPUT_PARAM);
};
} // namespace caffe2
| 1,608
| 26.741379
| 79
|
h
|
null |
pytorch-main/caffe2/operators/ensure_cpu_output_op.h
|
#ifndef CAFFE2_OPERATORS_ENSURE_CPU_OUTPUT_OP_H_
#define CAFFE2_OPERATORS_ENSURE_CPU_OUTPUT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class EnsureCPUOutputOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit EnsureCPUOutputOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
if (this->InputIsTensorType(0, CPU)) {
return CopyWithContext<CPUContext>();
} else if (this->InputIsTensorType(0, Context::GetDeviceType())) {
// CUDA Context will go this branch
return CopyWithContext<Context>();
} else {
CAFFE_THROW(
"Unexpected Input Blob: ",
OperatorBase::Inputs().at(0)->meta().name());
}
return true;
}
private:
template <class InputContext>
bool CopyWithContext() {
// Output is always on CPU
auto* output = this->template Output<Tensor>(0, CPU);
auto& input =
this->template Input<Tensor>(0, InputContext::GetDeviceType());
output->ResizeLike(input);
context_.CopyItemsToCPU(
input.dtype(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.dtype()));
context_.FinishDeviceComputation();
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ENSURE_CPU_OUTPUT_OP_H_
| 1,465
| 26.660377
| 71
|
h
|
null |
pytorch-main/caffe2/operators/expand_op.h
|
#ifndef CAFFE2_OPERATORS_EXPAND_OP_H_
#define CAFFE2_OPERATORS_EXPAND_OP_H_
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <typename InputTypes, class Context>
class ExpandOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ExpandOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "allow_broadcast_fastpath", allow_broadcast_fastpath_, false) {}
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& X = Input(0);
const auto& Y_shape_tensor = Input(1);
std::vector<int64_t> shape_dims(Y_shape_tensor.numel());
context_.template CopyToCPU<int64_t>(
Y_shape_tensor.numel(),
Y_shape_tensor.template data<int64_t>(),
shape_dims.data());
const int ndim = shape_dims.size();
const std::vector<int> X_dims(X.sizes().cbegin(), X.sizes().cend());
std::vector<int> Y_dims;
Y_dims.reserve(std::max(ndim, X.dim()));
// ndim, X.ndim() might equal to 0
for (int i = ndim - 1, j = X.dim() - 1; i >= 0 || j >= 0; --i, --j) {
const int shape_x = (j >= 0 ? X_dims[j] : 1);
// In PyTorch expand treats -1 as a special value to indicate
// preserving the size of that dimension.
const int shape_y = ((i >= 0 && shape_dims[i] > 0) ? shape_dims[i] : 1);
CAFFE_ENFORCE(
shape_x == 1 || shape_y == 1 || shape_x == shape_y,
"Dimensions format invalid.");
Y_dims.push_back(std::max(shape_x, shape_y));
}
std::reverse(Y_dims.begin(), Y_dims.end());
// TODO: remove when the function in math are changed to use vector<int64_t>
std::vector<int64_t> Y_dims_int64;
std::copy(Y_dims.begin(), Y_dims.end(), std::back_inserter(Y_dims_int64));
auto* Y = Output(0, Y_dims_int64, at::dtype<T>());
math::Broadcast<T, Context>(
X_dims.size(),
X_dims.data(),
Y_dims.size(),
Y_dims.data(),
T(1),
X.template data<T>(),
Y->template mutable_data<T>(),
&context_,
allow_broadcast_fastpath_);
return true;
}
const bool allow_broadcast_fastpath_;
};
template <typename InputTypes, class Context>
class ExpandGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ExpandGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "allow_broadcast_fastpath", allow_broadcast_fastpath_, false) {}
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& dY = Input(0);
const auto& X = Input(1);
const int ndim = dY.dim();
const std::vector<int> dX_dims(X.sizes().cbegin(), X.sizes().cend());
const std::vector<int> dY_dims(dY.sizes().cbegin(), dY.sizes().cend());
auto* dX = Output(0, X.sizes(), at::dtype<T>());
std::vector<int> axes;
const int offset = ndim - X.dim();
for (const auto i : c10::irange(ndim)) {
if (i < offset || dX_dims[i - offset] == 1) {
axes.push_back(i);
}
}
std::vector<int> X_dims = dY_dims;
for (const int axis : axes) {
X_dims[axis] = 1;
}
math::ReduceSum<T, Context>(
dY_dims.size(),
dY_dims.data(),
X_dims.data(),
T(1),
dY.template data<T>(),
dX->template mutable_data<T>(),
&context_,
allow_broadcast_fastpath_);
return true;
}
const bool allow_broadcast_fastpath_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_REDUCE_OPS_H_
| 3,914
| 30.071429
| 92
|
h
|
null |
pytorch-main/caffe2/operators/expand_squeeze_dims_op.h
|
#ifndef CAFFE2_OPERATORS_EXPAND_SQUEEZE_DIMS_OP_H_
#define CAFFE2_OPERATORS_EXPAND_SQUEEZE_DIMS_OP_H_
#include <c10/util/irange.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class ExpandDimsOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ExpandDimsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
dims_(this->template GetRepeatedArgument<int>("dims")) {
auto originalSize = dims_.size();
CAFFE_ENFORCE(originalSize > 0, "Parameter `dims` must be provided.");
std::sort(dims_.begin(), dims_.end());
dims_.erase(std::unique(dims_.begin(), dims_.end()), dims_.end());
if (dims_.size() < originalSize) {
LOG(WARNING) << "Parameter `dims` has repeated dimensions.";
}
CAFFE_ENFORCE(dims_.front() >= 0, "Dimension ids must be non-negative.");
}
bool RunOnDevice() override {
auto& input = Input(0);
auto* output = Output(0);
output->CopyFrom(input, true /*async*/);
if (dims_.empty()) {
return true;
}
auto newDims = input.sizes().vec();
CAFFE_ENFORCE_GE(
input.sizes().size() + dims_.size(),
dims_.back() + 1,
"Input needs at least ",
(1 + dims_.back() - dims_.size()),
" dimensions given `dims`.");
for (const auto dim : dims_) {
newDims.insert(newDims.begin() + dim, 1);
}
output->Reshape(newDims);
return true;
}
private:
vector<int> dims_;
};
template <class Context>
class SqueezeOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SqueezeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
dims_(this->template GetRepeatedArgument<int>("dims")) {
auto originalSize = dims_.size();
CAFFE_ENFORCE(originalSize > 0, "Parameter `dims` must be provided.");
std::sort(dims_.begin(), dims_.end());
dims_.erase(std::unique(dims_.begin(), dims_.end()), dims_.end());
if (dims_.size() < originalSize) {
LOG(WARNING) << "Parameter `dims` has repeated dimensions.";
}
CAFFE_ENFORCE(dims_.front() >= 0, "Dimension ids must be non-negative.");
}
bool RunOnDevice() override {
auto& input = Input(0);
auto* output = Output(0);
output->CopyFrom(input, true /*async*/);
CAFFE_ENFORCE_GT(
input.dim(),
dims_.back(),
"Input needs at least ",
(dims_.back() + 1),
" dimensions.");
std::vector<int> newDims = ComputeDims(input.sizes(), dims_);
output->Reshape(newDims);
return true;
}
static std::vector<int> ComputeDims(
at::IntArrayRef inputDims,
const std::vector<int>& dims) {
size_t j = 0;
std::vector<int> newDims;
for (const auto i : c10::irange(inputDims.size())) {
if (j < dims.size() && dims[j] == i) {
CAFFE_ENFORCE_EQ(
inputDims[i],
1,
"Dimension ",
i,
" of input must be 1",
" instead of ",
inputDims[i],
".");
++j;
continue;
}
newDims.push_back(inputDims.at(i));
}
return newDims;
}
private:
vector<int> dims_;
public:
C10_DISABLE_COPY_AND_ASSIGN(SqueezeOp);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_EXPAND_SQUEEZE_DIMS_OP_H_
| 3,430
| 27.122951
| 77
|
h
|
null |
pytorch-main/caffe2/operators/fc_inference.h
|
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
std::vector<TensorShape> FCShapeInference(
const OperatorDef& def,
const std::vector<TensorShape>& in,
bool pretransposed_weight);
OpSchema::Cost CostInferenceForFC(
const OperatorDef& def,
const std::vector<TensorShape>& in,
bool pretransposed_weight = false);
std::vector<TensorShape> FCGradientShapeInference(
const OperatorDef& def,
const std::vector<TensorShape>& in,
bool pretransposed_weight);
OpSchema::Cost CostInferenceForFCGradient(
const OperatorDef& def,
const std::vector<TensorShape>& in,
bool pretransposed_weight);
} // namespace caffe2
| 775
| 25.758621
| 50
|
h
|
null |
pytorch-main/caffe2/operators/feed_blob_op.h
|
#ifndef CAFFE2_OPERATORS_FEED_BLOB_OP_H_
#define CAFFE2_OPERATORS_FEED_BLOB_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class FeedBlobOp : public Operator<Context> {
public:
template <class... Args>
explicit FeedBlobOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
CAFFE_ENFORCE(
this->template HasSingleArgumentOfType<string>("value"),
"value argument must exist and be passed as a string");
value_ = this->template GetSingleArgument<string>("value", "");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
*this->template Output<std::string>(0) = value_;
return true;
}
private:
std::string value_;
};
} // namespace caffe2
#endif
| 802
| 21.942857
| 67
|
h
|
null |
pytorch-main/caffe2/operators/filler_op.h
|
#ifndef CAFFE2_OPERATORS_FILLER_OP_H_
#define CAFFE2_OPERATORS_FILLER_OP_H_
#include <c10/util/irange.h>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// FillerOp takes in either zero or one input.
//
// If the number of input is 1, the shape will be identical to that of the input
// at run time with optional additional dimensions appended at the end as
// specified by "extra_shape" argument. In that case the "shape" parameter
// should not be set.
//
// If the number of inputs is 0, the full shape must be provided via "shape"
// argument
template <class Context>
class FillerOp : public Operator<Context> {
public:
template <class... Args>
explicit FillerOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
shape_(this->template GetRepeatedArgument<int64_t>("shape")),
extra_shape_(ToVectorint64_t(
this->template GetRepeatedArgument<int>("extra_shape"))),
input_as_shape_(
this->template GetSingleArgument<bool>("input_as_shape", false)) {
if (InputSize()) {
if (shape_.size() != 0) {
CAFFE_THROW(
"Cannot set the shape argument and pass in an input at "
"the same time");
}
} else {
if (!extra_shape_.empty()) {
CAFFE_THROW("Cannot set extra_shape when there is no input");
}
if (input_as_shape_) {
CAFFE_THROW("An input must be given if input_as_shape is true");
}
if (shape_.size() == 0 &&
this->template HasSingleArgumentOfType<int>("shape")) {
CAFFE_THROW("Fill 'shape' argument was a scalar, list expected");
}
}
}
~FillerOp() override {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto* output = Operator<Context>::Output(0);
if (InputSize()) {
auto shape = vector<int64_t>{};
if (input_as_shape_) {
if (this->InputIsTensorType(0, CPU)) {
// originally, shape input must be in CPU context
auto& input = this->template Input<Tensor>(0, CPU);
CAFFE_ENFORCE_EQ(
input.dim(),
1,
"When input_as_shape is true, the input must be a 1D tensor of "
"data type int64_t");
CAFFE_ENFORCE(input.numel() > 0);
auto* shape_data = input.template data<int64_t>();
shape.insert(shape.end(), shape_data, shape_data + input.dim32(0));
} else {
// in ONNX case, we allow shape to be in CUDA context
auto& input = Input(0);
CAFFE_ENFORCE_EQ(
input.dim(),
1,
"When input_as_shape is true, the input must be a 1D tensor of "
"data type int64_t");
CAFFE_ENFORCE(input.numel() > 0);
auto* shape_data = input.template data<int64_t>();
std::unique_ptr<int64_t[]> shape_data_copy =
std::make_unique<int64_t[]>(input.dim32(0));
context_.template CopyToCPU<int64_t>(
input.dim32(0), shape_data, shape_data_copy.get());
shape.insert(
shape.end(),
shape_data_copy.get(),
shape_data_copy.get() + input.dim32(0));
}
} else {
auto& input = Input(0);
shape.insert(shape.end(), input.sizes().begin(), input.sizes().end());
}
shape.insert(shape.end(), extra_shape_.begin(), extra_shape_.end());
output->Resize(shape);
shape_ = shape;
} else {
output->Resize(shape_);
}
return Fill(output);
}
virtual bool Fill(Tensor* output) = 0;
protected:
vector<int64_t> shape_;
vector<int64_t> extra_shape_;
bool input_as_shape_;
};
template <typename T, class Context>
class UniformFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit UniformFillOp(Args&&... args)
: FillerOp<Context>(std::forward<Args>(args)...),
min_(this->template GetSingleArgument<T>("min", 0)),
max_(this->template GetSingleArgument<T>("max", 1)) {
if (InputSize() == 3) {
CAFFE_ENFORCE(
!this->template HasSingleArgumentOfType<T>("min"),
"Cannot set both min arg and min input blob");
CAFFE_ENFORCE(
!this->template HasSingleArgumentOfType<T>("max"),
"Cannot set both max arg and max input blob");
} else {
CAFFE_ENFORCE_LT(
min_, max_, "Max value should be bigger than min value.");
}
}
bool Fill(Tensor* output) override {
T min = min_;
T max = max_;
if (InputSize() == 3) {
CAFFE_ENFORCE_EQ(1, Input(1).numel(), "min blob must be scalar");
CAFFE_ENFORCE_EQ(1, Input(2).numel(), "max blob must be scalar");
min = *Input(1).template data<T>();
max = *Input(2).template data<T>();
if (min > max) {
auto shape = output->sizes().vec();
shape[0] = 0;
output->Resize(shape);
output->template mutable_data<T>();
return true;
}
}
math::RandUniform<T, Context>(
output->numel(),
min,
max,
output->template mutable_data<T>(),
&context_);
return true;
}
private:
T min_;
T max_;
};
template <class Context>
class UniqueUniformFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit UniqueUniformFillOp(Args&&... args)
: FillerOp<Context>(std::forward<Args>(args)...) {
TensorProto_DataType dtype =
static_cast<TensorProto_DataType>(this->template GetSingleArgument<int>(
"dtype", TensorProto_DataType_INT32));
switch (dtype) {
case TensorProto_DataType_INT32:
CheckRange<int>();
body_ = &UniqueUniformFillOp::FillWithType<int>;
break;
case TensorProto_DataType_INT64:
CheckRange<int64_t>();
body_ = &UniqueUniformFillOp::FillWithType<int64_t>;
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW(
"UniqueUniformFill op cannot have undefined 'dtype' argument");
// break;
default:
CAFFE_THROW("Unexpected 'dtype' argument value: ", dtype);
}
}
bool Fill(Tensor* output) override {
return (this->*body_)(output);
}
private:
template <typename T>
void CheckRange() {
CAFFE_ENFORCE(this->template HasSingleArgumentOfType<T>("min"));
CAFFE_ENFORCE(this->template HasSingleArgumentOfType<T>("max"));
CAFFE_ENFORCE_LT(
this->template GetSingleArgument<T>("min", 0),
this->template GetSingleArgument<T>("max", 0),
"Max value should be bigger than min value.");
}
template <typename T>
bool FillWithType(Tensor* output) {
T min = this->template GetSingleArgument<T>("min", 0);
T max = this->template GetSingleArgument<T>("max", 0);
const T* avoid_data = nullptr;
size_t avoid_size = 0;
if (InputSize() >= 2) {
auto& avoid = Input(1);
avoid_data = avoid.template data<T>();
avoid_size = avoid.numel();
}
math::RandUniformUnique<T, Context>(
output->numel(),
min,
max,
output->template mutable_data<T>(),
avoid_size,
avoid_data,
&context_);
return true;
}
bool (UniqueUniformFillOp::*body_)(Tensor* output);
};
template <class Context>
class ConstantFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ConstantFillOp(Args&&... args)
: FillerOp<Context>(std::forward<Args>(args)...) {
TensorProto_DataType dtype =
static_cast<TensorProto_DataType>(this->template GetSingleArgument<int>(
"dtype", TensorProto_DataType_FLOAT));
if (!OperatorBase::HasArgument("dtype") &&
OperatorBase::HasArgument("value")) {
// If 'dtype' is not provided, infer type based on the type of 'value'
// Currently, single argument contains either float, int64 or bytes
if (this->template HasSingleArgumentOfType<float>("value")) {
dtype = TensorProto_DataType_FLOAT;
} else if (this->template HasSingleArgumentOfType<int64_t>("value")) {
dtype = TensorProto_DataType_INT64;
} else {
CAFFE_THROW("Argument 'value' is of unexpected type");
}
VLOG(1) << "Argument 'dtype' is not provided. Assume the data type is "
<< "the same as that of argument 'value': " << dtype;
}
switch (dtype) {
case TensorProto_DataType_FLOAT:
body_ = &ConstantFillOp::FillWithType<float>;
break;
case TensorProto_DataType_DOUBLE:
body_ = &ConstantFillOp::FillWithType<double>;
break;
case TensorProto_DataType_BOOL:
body_ = &ConstantFillOp::FillWithType<bool>;
break;
case TensorProto_DataType_INT8:
body_ = &ConstantFillOp::FillWithType<int8_t>;
break;
case TensorProto_DataType_INT16:
body_ = &ConstantFillOp::FillWithType<int16_t>;
break;
case TensorProto_DataType_INT32:
body_ = &ConstantFillOp::FillWithType<int>;
break;
case TensorProto_DataType_INT64:
body_ = &ConstantFillOp::FillWithType<int64_t>;
break;
case TensorProto_DataType_UINT8:
body_ = &ConstantFillOp::FillWithType<uint8_t>;
break;
case TensorProto_DataType_UINT16:
body_ = &ConstantFillOp::FillWithType<uint16_t>;
break;
case TensorProto_DataType_STRING:
body_ = &ConstantFillOp::FillWithString;
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW("ConstantFill op cannot have undefined 'dtype' argument");
// break;
default:
CAFFE_THROW("Unexpected 'dtype' argument value: ", dtype);
}
}
bool Fill(Tensor* output) override {
return (this->*body_)(output);
}
template <typename T>
bool FillWithType(Tensor* output) {
T value = this->template GetSingleArgument<T>("value", 0);
if (InputSize() == 2) {
auto& value_vec = Input(1);
if (value_vec) {
CAFFE_ENFORCE_EQ(
value_vec.size(), 1, "value vector must have 1 element");
value = value_vec.template data<T>()[0];
}
}
auto* data = output->template mutable_data<T>();
if (output->numel()) {
math::Set<T, Context>(output->numel(), value, data, &context_);
}
return true;
}
bool FillWithString(Tensor* output) {
CAFFE_ENFORCE_LT(
InputSize(), 2, "constant fill string from tensor is not supported");
auto value = this->template GetSingleArgument<std::string>("value", "");
auto* data = output->template mutable_data<std::string>();
for (int i = 0; i < output->numel(); ++i) {
data[i] = value;
}
return true;
}
private:
bool (ConstantFillOp::*body_)(Tensor* output);
};
template <class Context>
class DiagonalFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit DiagonalFillOp(Args&&... args)
: FillerOp<Context>(std::forward<Args>(args)...) {
TensorProto_DataType dtype =
static_cast<TensorProto_DataType>(this->template GetSingleArgument<int>(
"dtype", TensorProto_DataType_FLOAT));
if (!OperatorBase::HasArgument("dtype") &&
OperatorBase::HasArgument("value")) {
// If 'dtype' is not provided, infer type based on the type of 'value'
// Currently, single argument contains either float, int64 or bytes
if (this->template HasSingleArgumentOfType<float>("value")) {
dtype = TensorProto_DataType_FLOAT;
} else if (this->template HasSingleArgumentOfType<int64_t>("value")) {
dtype = TensorProto_DataType_INT64;
} else {
CAFFE_THROW("Argument 'value' is of unexpected type");
}
VLOG(1) << "Argument 'dtype' is not provided. Assume the data type is "
<< "the same as that of argument 'value': " << dtype;
}
switch (dtype) {
case TensorProto_DataType_FLOAT:
body_ = &DiagonalFillOp::FillWithType<float>;
break;
case TensorProto_DataType_DOUBLE:
body_ = &DiagonalFillOp::FillWithType<double>;
break;
case TensorProto_DataType_BOOL:
body_ = &DiagonalFillOp::FillWithType<bool>;
break;
case TensorProto_DataType_INT8:
body_ = &DiagonalFillOp::FillWithType<int8_t>;
break;
case TensorProto_DataType_INT16:
body_ = &DiagonalFillOp::FillWithType<int16_t>;
break;
case TensorProto_DataType_INT32:
body_ = &DiagonalFillOp::FillWithType<int>;
break;
case TensorProto_DataType_INT64:
body_ = &DiagonalFillOp::FillWithType<int64_t>;
break;
case TensorProto_DataType_UINT8:
body_ = &DiagonalFillOp::FillWithType<uint8_t>;
break;
case TensorProto_DataType_UINT16:
body_ = &DiagonalFillOp::FillWithType<uint16_t>;
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW("Cannot have undefined 'dtype' argument");
default:
CAFFE_THROW("Unexpected 'dtype' argument value: ", dtype);
}
}
bool Fill(Tensor* output) override {
return (this->*body_)(output);
}
template <typename T>
bool FillWithType(Tensor* output);
private:
void VerifyOutputShape(Tensor* output) {
CAFFE_ENFORCE(output->dim() >= 2, "Input shape must be >= 2D");
}
int64_t GetStepSize(Tensor* output) {
int64_t step;
if (output->dim() == 2) {
step = output->size(1) + 1;
} else {
int64_t prev_i = output->size(0);
for (auto i : output->sizes()) {
if (i != prev_i) {
CAFFE_THROW("All dimensions of input must be of equal length");
}
}
vector<int64_t> cumprod(output->dim());
auto dims = output->sizes();
std::partial_sum(
dims.begin(),
dims.end() - 1,
cumprod.begin(),
std::multiplies<int64_t>());
step = 1 +
std::accumulate(
cumprod.begin(), cumprod.end(), static_cast<int64_t>(0));
VLOG(0) << step;
}
return step;
}
bool (DiagonalFillOp::*body_)(Tensor* output);
};
template <typename T, class Context>
class GaussianFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit GaussianFillOp(Args&&... args)
: FillerOp<Context>(std::forward<Args>(args)...),
mean_(this->template GetSingleArgument<float>("mean", 0)),
std_(this->template GetSingleArgument<float>("std", 1)) {
TORCH_DCHECK_GT(std_, 0) << "Standard deviation should be nonnegative.";
}
bool Fill(Tensor* output) override {
math::RandGaussian<T, Context>(
output->numel(),
mean_,
std_,
output->template mutable_data<T>(),
&context_);
return true;
}
private:
T mean_;
T std_;
};
template <typename T, class Context>
class XavierFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit XavierFillOp(Args&&... args)
: FillerOp<Context>(std::forward<Args>(args)...) {}
bool Fill(Tensor* output) override {
const int fan_in = output->numel() / output->dim32(0);
T scale = std::sqrt(T(3) / fan_in);
math::RandUniform<T, Context>(
output->numel(),
-scale,
scale,
output->template mutable_data<T>(),
&context_);
return true;
}
};
template <typename T, class Context>
class MSRAFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit MSRAFillOp(Args&&... args)
: FillerOp<Context>(std::forward<Args>(args)...) {}
bool Fill(Tensor* output) override {
const int fan_out = output->numel() / output->dim32(1);
T scale = std::sqrt(T(2) / fan_out);
math::RandGaussian<T, Context>(
output->numel(),
0.0,
scale,
output->template mutable_data<T>(),
&context_);
return true;
}
};
// This is mostly used just as a debugging purpose stuff: it fills a tensor
// sequentially with values 0, 1, 2..., which can then be used to check e.g.
// reshape operations by allowing one to read the indices more easily.
template <typename T, class Context>
class RangeFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit RangeFillOp(Args&&... args)
: FillerOp<Context>(std::forward<Args>(args)...) {}
bool Fill(Tensor* output) override;
};
template <class Context>
class LengthsRangeFillOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(LengthsRangeFillOp);
bool RunOnDevice() override {
auto& input = Input(0);
auto* input_data = input.template data<int32_t>();
CAFFE_ENFORCE_EQ(input.dim(), 1, "Input must be a vector.");
auto len_sum = std::accumulate(input_data, input_data + input.numel(), 0);
auto* output = Output(0, {len_sum}, at::dtype<int32_t>());
auto* output_data = output->template mutable_data<int32_t>();
int32_t offset = 0;
for (const auto i : c10::irange(input.numel())) {
auto len = input_data[i];
auto start = output_data + offset;
std::iota(
start,
start + len,
0); // make the third argument the arg of this operator
offset += len;
}
return true;
}
};
template <int VALUE_TYPE = TensorProto_DataType_FLOAT>
inline std::vector<TensorShape> FillerTensorInference(
const OperatorDef& def,
const vector<TensorShape>& in) {
vector<TensorShape> out(1);
ArgumentHelper helper(def);
out[0].set_data_type(static_cast<TensorProto_DataType>(
helper.GetSingleArgument<int>("dtype", VALUE_TYPE)));
if (in.size()) {
// TODO
bool input_as_shape =
helper.GetSingleArgument<bool>("input_as_shape", false);
if (input_as_shape) {
out[0].set_unknown_shape(true);
return out;
}
for (auto d : in[0].dims()) {
out[0].add_dims(d);
}
} else {
auto shape = helper.GetRepeatedArgument<int64_t>("shape");
for (auto d : shape) {
out[0].add_dims(d);
}
}
return out;
}
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FILLER_OP_H_
| 18,475
| 30.582906
| 80
|
h
|
null |
pytorch-main/caffe2/operators/find_duplicate_elements_op.h
|
#ifndef CAFFE2_OPERATORS_FIND_DUPLICATE_ELEMENTS_OP_H
#define CAFFE2_OPERATORS_FIND_DUPLICATE_ELEMENTS_OP_H
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "c10/util/irange.h"
#include <unordered_map>
#include <vector>
namespace caffe2 {
template <class Context>
class FindDuplicateElementsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(FindDuplicateElementsOp);
USE_DISPATCH_HELPER;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double, int, long, std::string>>::
call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& data = Input(0);
CAFFE_ENFORCE(data.dim() == 1, "data should be 1-D.");
const auto* data_ptr = data.template data<T>();
std::unordered_map<T, int64_t> dict;
std::vector<int64_t> dupIndices;
// i is the index of unique elements, j is the index of all elements
for (int64_t i = 0, j = 0; j < data.sizes()[0]; ++i, ++j) {
bool retVal = dict.insert({data_ptr[j], i}).second;
if (!retVal) {
--i;
dupIndices.push_back(j);
}
}
const auto dupSize = dupIndices.size();
auto* output =
Output(0, {static_cast<int64_t>(dupSize)}, at::dtype<int64_t>());
auto* out_ptr = output->template mutable_data<int64_t>();
for (const auto i : c10::irange(dupSize)) {
out_ptr[i] = dupIndices[i];
}
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FIND_DUPLICATE_ELEMENTS_OP_H
| 1,597
| 26.084746
| 79
|
h
|
null |
pytorch-main/caffe2/operators/find_op.h
|
#ifndef CAFFE2_OPERATORS_FIND_OP_H_
#define CAFFE2_OPERATORS_FIND_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
#include <unordered_map>
namespace caffe2 {
template <class Context>
class FindOp final : public Operator<Context> {
public:
template <class... Args>
explicit FindOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
missing_value_(
this->template GetSingleArgument<int>("missing_value", -1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_DISPATCH_HELPER;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int, long>>::call(this, Input(0));
}
protected:
template <typename T>
bool DoRunWithType() {
auto& idx = Input(0);
auto& needles = Input(1);
auto* res_indices = Output(0, needles.sizes(), at::dtype<T>());
const T* idx_data = idx.template data<T>();
const T* needles_data = needles.template data<T>();
T* res_data = res_indices->template mutable_data<T>();
auto idx_size = idx.numel();
// Use an arbitrary cut-off for when to use brute-force
// search. For larger needle sizes we first put the
// index into a map
if (needles.numel() < 16) {
// Brute force O(nm)
for (const auto i : c10::irange(needles.numel())) {
T x = needles_data[i];
T res = static_cast<T>(missing_value_);
for (int j = idx_size - 1; j >= 0; j--) {
if (idx_data[j] == x) {
res = j;
break;
}
}
res_data[i] = res;
}
} else {
// O(n + m)
std::unordered_map<T, int> idx_map;
for (const auto j : c10::irange(idx_size)) {
idx_map[idx_data[j]] = j;
}
for (const auto i : c10::irange(needles.numel())) {
T x = needles_data[i];
auto it = idx_map.find(x);
res_data[i] = (it == idx_map.end() ? missing_value_ : it->second);
}
}
return true;
}
protected:
int missing_value_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FIND_OP_H_
| 2,117
| 25.475
| 74
|
h
|
null |
pytorch-main/caffe2/operators/flatten_op.h
|
#ifndef CAFFE2_OPERATORS_FLATTEN_OP_H_
#define CAFFE2_OPERATORS_FLATTEN_OP_H_
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class FlattenOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit FlattenOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axis_(this->template GetSingleArgument<int>("axis", 1)) {}
bool RunOnDevice() override {
auto& input = Input(0);
auto* output = Output(0);
CAFFE_ENFORCE_GE(
input.dim(), axis_, "The rank of the tensor must be >= axis.");
output->Resize(input.size_to_dim(axis_), input.size_from_dim(axis_));
context_.CopyItemsSameDevice(
input.dtype(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.dtype()));
return true;
}
private:
int axis_;
};
inline std::vector<TensorShape> TensorInferenceForFlatten(
const OperatorDef& def,
const std::vector<TensorShape>& in) {
ArgumentHelper helper(def);
const int axis = helper.GetSingleArgument<int>("axis", 1);
std::vector<TensorShape> out(1);
int64_t outer = 1;
int64_t inner = 1;
std::size_t index = 0;
for (auto d : in[0].dims()) {
if (index < axis) {
outer *= d;
} else {
inner *= d;
}
++index;
}
out[0].set_data_type(in[0].data_type());
out[0].add_dims(outer);
out[0].add_dims(inner);
return out;
}
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FLATTEN_OP_H_
| 1,525
| 23.612903
| 73
|
h
|
null |
pytorch-main/caffe2/operators/flexible_top_k.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef CAFFE2_OPERATORS_FLEXIBLE_TOP_K_H_
#define CAFFE2_OPERATORS_FLEXIBLE_TOP_K_H_
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class FlexibleTopKOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit FlexibleTopKOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override;
};
template <typename T, class Context>
class FlexibleTopKGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit FlexibleTopKGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FLEXIBLE_TOP_K_H_
| 936
| 23.025641
| 57
|
h
|
null |
pytorch-main/caffe2/operators/floor_op.h
|
#ifndef CAFFE2_OPERATORS_FLOOR_OP_H_
#define CAFFE2_OPERATORS_FLOOR_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <typename T, class Context>
class FloorOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(FloorOp);
bool RunOnDevice() override {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<float>());
const float* Xdata = X.template data<float>();
float* Ydata = Y->template mutable_data<float>();
for (const auto i : c10::irange(X.numel())) {
Ydata[i] = std::floor(Xdata[i]);
}
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FLOOR_OP_H_
| 825
| 22.6
| 55
|
h
|
null |
pytorch-main/caffe2/operators/free_op.h
|
#ifndef CAFFE2_OPERATORS_FREE_OP_H_
#define CAFFE2_OPERATORS_FREE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
// FreeOp frees the content of the output blob. We allow it to take in input
// blobs purely for the reason that it can "wait" on the input blobs to be
// produced by some of the earlier operators before a free is called.
template <class Context>
class FreeOp : public Operator<Context> {
public:
template <class... Args>
explicit FreeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
for (Blob* output : OperatorBase::Outputs()) {
output->Reset();
}
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FREE_OP_H_
| 777
| 24.933333
| 76
|
h
|
null |
pytorch-main/caffe2/operators/fully_connected_op.h
|
#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
#include <c10/util/Optional.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// This is Caffe's InnerProductOp, with a name that fits its purpose better.
template <
class Context,
class Engine = DefaultEngine,
bool TransposeWeight = true>
class FullyConnectedOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit FullyConnectedOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axis_(this->template GetSingleArgument<int32_t>("axis", 1)),
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
float16_compute_(
this->template GetSingleArgument<bool>("float16_compute", false)) {}
~FullyConnectedOp() override {}
template <
typename T_X,
typename T_W,
typename T_B,
typename T_Y,
typename MATH>
bool DoRunWithType() {
const auto& X = Input(0);
const auto& W = Input(1);
const auto& b = Input(2);
CAFFE_ENFORCE(b.dim() == 1, b.dim());
// batch size
const auto canonical_axis = X.canonical_axis_index(axis_);
const auto M = X.size_to_dim(canonical_axis);
const auto K = X.size_from_dim(canonical_axis);
const auto canonical_axis_w = W.canonical_axis_index(axis_w_);
const int N = TransposeWeight ? W.size_to_dim(canonical_axis_w)
: W.size_from_dim(canonical_axis_w);
auto dimErrorString = [&]() {
return c10::str(
"Dimension mismatch: ",
"X: ",
X.sizes(),
", W: ",
W.sizes(),
", b: ",
b.sizes(),
", axis: ",
axis_,
", M: ",
M,
", N: ",
N,
", K: ",
K);
};
// Error checking
CAFFE_ENFORCE(M == X.numel() / K, dimErrorString());
CAFFE_ENFORCE(K == W.numel() / N, dimErrorString());
CAFFE_ENFORCE(N == b.dim32(0), dimErrorString());
CAFFE_ENFORCE(N == b.numel(), dimErrorString());
Y_shape_cache_ = X.sizes().vec();
// This is an invariant of canonical_axis, so we can DCHECK.
TORCH_DCHECK_LE(canonical_axis + 1, Y_shape_cache_.size());
Y_shape_cache_.resize(canonical_axis + 1);
Y_shape_cache_[canonical_axis] = N;
auto* Y = Output(0, Y_shape_cache_, at::dtype<T_Y>());
CAFFE_ENFORCE(M * N == Y->numel(), dimErrorString());
if (X.numel() == 0) {
// skip the rest of the computation if X is empty
Y->template mutable_data<T_Y>();
return true;
}
// default to FLOAT as math.h does.
TensorProto::DataType math_type = TensorProto_DataType_FLOAT;
if (fp16_type<MATH>()) {
math_type = TensorProto_DataType_FLOAT16;
}
// W * x
math::Gemm<T_X, Context, Engine>(
CblasNoTrans,
TransposeWeight ? CblasTrans : CblasNoTrans,
M,
N,
K,
1,
X.template data<T_X>(),
W.template data<T_W>(),
0,
Y->template mutable_data<T_Y>(),
&context_,
math_type);
// Add bias term
if (!bias_multiplier_.has_value()) {
bias_multiplier_ =
caffe2::empty({M}, at::dtype<T_B>().device(Context::GetDeviceType()));
math::Set<T_B, Context>(
M,
convert::To<float, T_B>(1),
bias_multiplier_->template mutable_data<T_B>(),
&context_);
} else if (bias_multiplier_->numel() != M) {
bias_multiplier_->Resize(M);
math::Set<T_B, Context>(
M,
convert::To<float, T_B>(1),
bias_multiplier_->template mutable_data<T_B>(),
&context_);
}
math::Gemm<T_B, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
N,
1,
1,
bias_multiplier_->template data<T_B>(),
b.template data<T_B>(),
1,
Y->template mutable_data<T_Y>(),
&context_,
math_type);
return true;
}
bool RunOnDevice() override {
return DoRunWithType<
float, // X
float, // W
float, // B
float, // Y
float>(); // Math
}
protected:
size_t axis_{1};
size_t axis_w_{1};
// A local vector to cache the output shape so we don't need to recreate
// a vector object every time we run Run().
vector<int64_t> Y_shape_cache_;
c10::optional<Tensor> bias_multiplier_;
bool float16_compute_;
};
template <
class Context,
class Engine = DefaultEngine,
bool TransposeWeight = true>
class FullyConnectedGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit FullyConnectedGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axis_(this->template GetSingleArgument<int32_t>("axis", 1)),
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
float16_compute_(
this->template GetSingleArgument<bool>("float16_compute", false)) {}
~FullyConnectedGradientOp() override {}
template <
typename T_X,
typename T_W,
typename T_DY,
typename T_B,
typename T_DX,
typename T_DW,
typename T_DB,
typename MATH>
bool DoRunWithType() {
const auto& X = Input(0);
const auto& W = Input(1);
const auto& dY = Input(2);
// batch size
const auto canonical_axis = X.canonical_axis_index(axis_);
const int M = X.size_to_dim(canonical_axis);
const int K = X.size_from_dim(canonical_axis);
const auto canonical_axis_w = W.canonical_axis_index(axis_w_);
const int N = TransposeWeight ? W.size_to_dim(canonical_axis_w)
: W.size_from_dim(canonical_axis_w);
auto dimErrorString = [&]() {
return c10::str(
"Dimension mismatch: ",
"X: ",
X.sizes(),
", W: ",
W.sizes(),
", dY: ",
dY.sizes(),
", axis: ",
axis_,
", M: ",
M,
", N: ",
N,
", K: ",
K);
};
CAFFE_ENFORCE(M * K == X.numel(), dimErrorString());
CAFFE_ENFORCE(K * N == W.numel(), dimErrorString());
auto* dW = Output(0, W.sizes(), at::dtype<T_DW>());
auto* db = Output(1, {N}, at::dtype<T_DB>());
if (X.numel() == 0) {
// generate a zero blob for db and dW when X is empty
math::Set<T_DB, Context>(
db->numel(),
convert::To<float, T_DB>(0),
db->template mutable_data<T_DB>(),
&context_);
math::Set<T_DW, Context>(
dW->numel(),
convert::To<float, T_DW>(0),
dW->template mutable_data<T_DW>(),
&context_);
if (OutputSize() == 3) {
Output(2, X.sizes(), at::dtype<T_DX>());
}
return true;
}
// default to FLOAT as math.h does.
TensorProto::DataType math_type = TensorProto_DataType_FLOAT;
if (fp16_type<MATH>()) {
math_type = TensorProto_DataType_FLOAT16;
}
// Compute dW
math::Gemm<T_DY, Context, Engine>(
CblasTrans,
CblasNoTrans,
TransposeWeight ? N : K,
TransposeWeight ? K : N,
M,
1,
TransposeWeight ? dY.template data<T_DY>() : X.template data<T_X>(),
TransposeWeight ? X.template data<T_X>() : dY.template data<T_DY>(),
0,
dW->template mutable_data<T_DW>(),
&context_,
math_type);
if (!bias_multiplier_.has_value()) {
bias_multiplier_ =
caffe2::empty({M}, at::dtype<T_B>().device(Context::GetDeviceType()));
math::Set<T_B, Context>(
M,
convert::To<float, T_B>(1),
bias_multiplier_->template mutable_data<T_B>(),
&context_);
} else if (bias_multiplier_->numel() != M) {
bias_multiplier_->Resize(M);
math::Set<T_B, Context>(
M,
convert::To<float, T_B>(1),
bias_multiplier_->template mutable_data<T_B>(),
&context_);
}
// Compute dB
math::Gemv<T_DY, Context>(
CblasTrans,
M,
N,
1,
dY.template data<T_DY>(),
bias_multiplier_->template data<T_B>(),
0,
db->template mutable_data<T_DB>(),
&context_);
// Compute dX
if (OutputSize() == 3) {
auto* dX = Output(2, X.sizes(), at::dtype<T_DX>());
math::Gemm<T_DX, Context, Engine>(
CblasNoTrans,
TransposeWeight ? CblasNoTrans : CblasTrans,
M,
K,
N,
1,
dY.template data<T_DY>(),
W.template data<T_W>(),
0,
dX->template mutable_data<T_DX>(),
&context_,
math_type);
}
return true;
}
bool RunOnDevice() override {
return DoRunWithType<
float, // X
float, // W
float, // dY
float, // B
float, // dX
float, // dW
float, // dB
float>(); // Math
}
protected:
size_t axis_{1};
size_t axis_w_{1};
c10::optional<Tensor> bias_multiplier_;
bool float16_compute_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
| 9,375
| 26.904762
| 80
|
h
|
null |
pytorch-main/caffe2/operators/fused_rowwise_8bit_conversion_ops.h
|
#ifndef CAFFE2_OPERATORS_FUSED_ROWWISE_8BIT_CONVERSION_OPS_H_
#define CAFFE2_OPERATORS_FUSED_ROWWISE_8BIT_CONVERSION_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include <c10/util/irange.h>
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/reducer_functors.h"
#include "caffe2/perfkernels/fused_nbit_rowwise_conversion.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(Fused8BitRowwiseQuantizedToFloat);
namespace caffe2 {
#define IS_LITTLE_ENDIAN \
[] { \
const int32_t kValue = 1; \
return reinterpret_cast<const std::uint8_t*>(&kValue)[0] == 1; \
}()
template <
typename T,
typename Tsb, // Type for Scale and Bias
void (*convert)(float* dst, const T* src, size_t N),
bool HAS_CONVERT,
class Context>
class FloatToFused8BitRowwiseQuantizedOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(FloatToFused8BitRowwiseQuantizedOp)
bool RunOnDevice() override {
const auto& input = Input(DATA_FLOAT);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
// The "fused" representation stores the scale and bias with the row-wise
// quantized data in one tensor. Since we quantize with 8 bits (1 byte) and
// represent the scale and bias with 32-bit floats, we'll use the last 8
// bytes of each row for scale (4 bytes) and bias (4 bytes).
// | ... int8 data ... | scale | bias |
// | number_of_columns | sizeof(Tsb)| sizeof(Tsb)|
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] =
input_columns + 2 * static_cast<std::int64_t>(sizeof(Tsb));
auto* output = Output(
DATA_FUSED_SCALE_BIAS_INT8,
output_dimensions,
at::dtype<std::uint8_t>());
const auto* input_data = input.template data<T>();
auto* output_data = output->template mutable_data<std::uint8_t>();
const auto output_columns = output->size(output->dim() - 1);
bool is_float = std::is_same<T, float>::value;
bool out_sb_half = std::is_same<Tsb, at::Half>::value;
if (!HAS_CONVERT) {
CAFFE_ENFORCE(is_float, "convert can be nullptr only if T is float");
if (out_sb_half) {
FloatToFusedNBitRowwiseQuantizedSBHalf(
8,
reinterpret_cast<const float*>(input_data),
input_rows,
input_columns,
output_data);
} else {
FloatToFused8BitRowwiseQuantized(
reinterpret_cast<const float*>(input_data),
input_rows,
input_columns,
output_data);
}
} else {
bool is_half = std::is_same<T, at::Half>::value;
CAFFE_ENFORCE(is_half);
vector<float> tmp(input_columns);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto row : c10::irange(input_rows)) {
convert(tmp.data(), input_data + row * input_columns, input_columns);
if (out_sb_half) {
FloatToFusedNBitRowwiseQuantizedSBHalf(
8,
tmp.data(),
1,
input_columns,
output_data + row * output_columns);
} else {
FloatToFused8BitRowwiseQuantized(
tmp.data(), 1, input_columns, output_data + row * output_columns);
}
}
}
return true;
}
private:
INPUT_TAGS(DATA_FLOAT);
OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS_INT8);
};
template <
typename T,
typename Tsb,
void (*convert)(T* dst, const float* src, size_t N),
bool HAS_CONVERT,
class Context>
class Fused8BitRowwiseQuantizedToFloatOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(Fused8BitRowwiseQuantizedToFloatOp)
bool RunOnDevice() override {
const auto& input = Input(DATA_FUSED_SCALE_BIAS_INT8);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
// The last 2*sizeof(Tsb) bytes per row are the scale and the bias.
// The rest of input_columns is the number of values in the original row.
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] =
input_columns - 2 * static_cast<std::int64_t>(sizeof(Tsb));
auto* output = Output(DATA_FLOAT, output_dimensions, at::dtype<T>());
const auto output_columns = output->size(output->dim() - 1);
const auto* input_data = input.template data<std::uint8_t>();
T* output_data = output->template mutable_data<T>();
bool is_float = std::is_same<T, float>::value;
bool in_sb_half = std::is_same<Tsb, at::Half>::value;
if (!HAS_CONVERT) {
CAFFE_ENFORCE(is_float, "convert can be nullptr only if T is float");
if (in_sb_half) {
FusedNBitRowwiseQuantizedSBHalfToFloat(
8,
input_data,
input_rows,
input_columns,
reinterpret_cast<float*>(output_data));
} else {
Fused8BitRowwiseQuantizedToFloat(
input_data,
input_rows,
input_columns,
reinterpret_cast<float*>(output_data));
}
} else {
bool is_half = std::is_same<T, at::Half>::value;
CAFFE_ENFORCE(is_half);
vector<float> tmp(input_columns);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto row : c10::irange(input_rows)) {
if (in_sb_half) {
FusedNBitRowwiseQuantizedSBHalfToFloat(
8,
input_data + row * input_columns,
1,
input_columns,
tmp.data());
} else {
Fused8BitRowwiseQuantizedToFloat(
input_data + row * input_columns, 1, input_columns, tmp.data());
}
convert(output_data + row * output_columns, tmp.data(), output_columns);
}
}
return true;
}
private:
INPUT_TAGS(DATA_FUSED_SCALE_BIAS_INT8);
OUTPUT_TAGS(DATA_FLOAT);
};
#undef IS_LITTLE_ENDIAN
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FUSED_ROWWISE_8BIT_CONVERSION_OPS_H_
| 6,504
| 32.880208
| 80
|
h
|
null |
pytorch-main/caffe2/operators/fused_rowwise_nbit_conversion_ops.h
|
#pragma once
#include <algorithm>
#include <vector>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
// for param_search_greedy
#include "caffe2/operators/fused_rowwise_nbitfake_conversion_ops.h"
#include "caffe2/perfkernels/fused_nbit_rowwise_conversion.h"
namespace caffe2 {
template <
int BIT_RATE,
typename T,
void (*convert)(float* dst, const T* src, size_t N),
bool GREEDY = false>
class FloatToFusedNBitRowwiseQuantizedOp final : public Operator<CPUContext> {
public:
FloatToFusedNBitRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FloatToFusedNBitRowwiseQuantizedOp() override {}
bool RunOnDevice() override {
const auto& input = Input(DATA_FLOAT);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
CAFFE_ENFORCE_EQ(
input.dim(input.dim() - 1) % NUM_ELEM_PER_BYTE,
0,
"FloatToFused" + caffe2::to_string(BIT_RATE) +
"BitRowwiseQuantizedOp only works for the number of "
"columns a multiple of " +
caffe2::to_string(NUM_ELEM_PER_BYTE));
// The "fused" representation stores the scale and bias with the
// row-wise quantized data in one tensor.
// Since we represent the scale and bias in 16-bit float, we'll use the
// last 4 bytes of each row for scale (2 bytes) and bias (2 bytes).
// | ... quantized data ... | scale | bias |
// | number_of_columns | 2B | 2B |
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] = static_cast<std::int64_t>(
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
2 * sizeof(at::Half));
auto* output = Output(
DATA_FUSED_SCALE_BIAS, output_dimensions, at::dtype<std::uint8_t>());
const auto* input_data = input.template data<T>();
auto* output_data = output->template mutable_data<std::uint8_t>();
if (!GREEDY && std::is_same<T, float>::value) {
// fast path
CAFFE_ENFORCE(
reinterpret_cast<void (*)(float*, const float*, std::size_t)>(
convert) == internal::convertfp32fp32,
"When T == float, convert must be convertfp32fp32");
FloatToFusedNBitRowwiseQuantizedSBHalf(
BIT_RATE,
reinterpret_cast<const float*>(input_data),
input_rows,
input_columns,
output_data);
} else {
const auto output_columns = output->size(output->dim() - 1);
#ifdef _OPENMP
vector<float> tmp_vec(
input_columns * (GREEDY ? omp_get_max_threads() : 1));
#else
vector<float> tmp_vec(input_columns);
#endif
#pragma omp parallel for if (GREEDY)
for (int row = 0; row < input_rows; ++row) {
float* tmp = tmp_vec.data();
#ifdef _OPENMP
if (GREEDY) {
tmp = &tmp_vec[omp_get_thread_num() * input_columns];
}
#endif
convert(tmp, input_data + row * input_columns, input_columns);
std::uint8_t* output_row = output_data + row * output_columns;
at::Half* output_row_scale = reinterpret_cast<at::Half*>(
output_row +
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE);
at::Half* output_row_bias = reinterpret_cast<at::Half*>(
output_row +
(input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
sizeof(at::Half));
float Xmin = *std::min_element(tmp, tmp + input_columns);
float Xmax = *std::max_element(tmp, tmp + input_columns);
if (GREEDY) {
internal::param_search_greedy(
tmp, input_columns, 200, 0.16, Xmin, Xmax, BIT_RATE);
}
// Round Xmin to fp16 to match with dequantization that will use fp16
// for Xmin.
Xmin = static_cast<at::Half>(Xmin);
const float range = Xmax - Xmin;
// Round scale to fp16 to match with dequantization that will use fp16
// for scale.
// Set scale to 1.0f for the corner case of Xmax == Xmin .
// Any non-zero scale would work because during quantization
// (X - Xmin) / scale will be 0 for all X unless scale is 0.
at::Half scale = range == 0 ? 1.0f : range / ((1 << BIT_RATE) - 1);
float inverse_scale = scale == 0 ? 1.0f : 1.0f / scale;
if (scale == 0 || std::isinf(inverse_scale)) {
// Corner case handling when Xmax == Xmin
// Any scale would work because X - Xmin will be 0 for all X
scale = 1.0f;
inverse_scale = 1.0f;
}
*output_row_scale = scale;
*output_row_bias = Xmin;
for (const auto col : c10::irange(input_columns)) {
float X = tmp[col];
std::uint8_t quantized = std::max(
0,
std::min<int>(
std::lrintf((X - Xmin) * inverse_scale),
(1 << BIT_RATE) - 1));
if (col % NUM_ELEM_PER_BYTE == 0) {
output_row[col / NUM_ELEM_PER_BYTE] = quantized;
} else {
output_row[col / NUM_ELEM_PER_BYTE] |=
(quantized << ((col % NUM_ELEM_PER_BYTE) * BIT_RATE));
}
}
}
} // GREEDY || !std::is_same<T, float>::value
return true;
}
private:
INPUT_TAGS(DATA_FLOAT);
OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS);
};
template <
int BIT_RATE,
typename T,
void (*convert)(T* dst, const float* src, size_t N)>
class FusedNBitRowwiseQuantizedToFloatOp final : public Operator<CPUContext> {
public:
FusedNBitRowwiseQuantizedToFloatOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FusedNBitRowwiseQuantizedToFloatOp() override {}
bool RunOnDevice() override {
const auto& input = Input(DATA_FUSED_SCALE_BIAS);
CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1");
const auto input_rows = input.size_to_dim(input.dim() - 1);
const auto input_columns = input.size(input.dim() - 1);
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// The last 4 bytes per row are two fp16 scale and bias.
// The rest of input_columns is the number of values in the original row.
auto output_dimensions = input.sizes().vec();
output_dimensions[input.dim() - 1] =
static_cast<std::int64_t>(input_columns - 2 * sizeof(at::Half)) *
NUM_ELEM_PER_BYTE;
auto* output = Output(DATA_FLOAT, output_dimensions, at::dtype<T>());
const auto output_columns = output->size(output->dim() - 1);
const auto* input_data = input.template data<std::uint8_t>();
T* output_data = output->template mutable_data<T>();
if (std::is_same<T, float>::value) {
// fast path
CAFFE_ENFORCE(
reinterpret_cast<void (*)(float*, const float*, std::size_t)>(
convert) == internal::convertfp32fp32,
"When T == float, convert must be convertfp32fp32");
FusedNBitRowwiseQuantizedSBHalfToFloat(
BIT_RATE,
input_data,
input_rows,
input_columns,
reinterpret_cast<float*>(output_data));
} else {
std::vector<float> tmp(output_columns);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto row : c10::irange(input_rows)) {
const std::uint8_t* input_row = input_data + row * input_columns;
float scale = *reinterpret_cast<const at::Half*>(
input_row +
(output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE);
float bias = *reinterpret_cast<const at::Half*>(
input_row +
(output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE +
sizeof(at::Half));
for (const auto col : c10::irange(output_columns)) {
std::uint8_t quantized = input_row[col / NUM_ELEM_PER_BYTE];
quantized >>= (col % NUM_ELEM_PER_BYTE) * BIT_RATE;
quantized &= (1 << BIT_RATE) - 1;
tmp[col] = scale * quantized + bias;
}
convert(output_data + row * output_columns, tmp.data(), output_columns);
}
}
return true;
}
private:
INPUT_TAGS(DATA_FUSED_SCALE_BIAS);
OUTPUT_TAGS(DATA_FLOAT);
};
} // namespace caffe2
| 8,580
| 35.67094
| 80
|
h
|
null |
pytorch-main/caffe2/operators/fused_rowwise_nbitfake_conversion_ops.h
|
#pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/reducer_functors.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace internal {
inline bool is_little_endian() {
constexpr std::int32_t kValue = 1;
return reinterpret_cast<const std::uint8_t*>(&kValue)[0] == 1;
}
void convertfp32fp32(float* dst, const float* src, size_t N);
void convertfp16fp32(float* dst, const at::Half* src, size_t N);
/**
* @params Xmin initial solution passed and potentiall better solution returns
* @params Xmax initial solution passed and potentiall better solution returns
*/
void param_search_greedy(
const float* X,
int N,
const int n_bins, // = 200,
const float ratio, // = 0.16,
float& Xmin,
float& Xmax,
int bit_rate);
} // namespace internal
// Fake 2/4 bit quantization
// Creates a 2/4bit rowwise quantized blob with scales and biases in fp16
// The storage format is 8 bit rowwise with scales and biases in fp32
template <
int BIT_RATE,
typename T,
void (*convert)(float* dst, const T* src, size_t N),
bool GREEDY = false>
class FloatToFusedNBitFakeRowwiseQuantizedOp final
: public Operator<CPUContext> {
public:
FloatToFusedNBitFakeRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~FloatToFusedNBitFakeRowwiseQuantizedOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness");
const auto& input = Input(DATA_FLOAT);
const auto input_rows = input.size(0);
const auto input_columns = input.size(1);
CAFFE_ENFORCE_EQ(input.dim(), 2, "Expect input to be a matrix");
const std::vector<int64_t> output_dimensions = {input_rows,
input_columns + 8};
auto* output = Output(
DATA_FUSED_SCALE_BIAS_INT8, output_dimensions, at::dtype<uint8_t>());
const auto* input_data = input.template data<T>();
auto* output_data = output->template mutable_data<uint8_t>();
const auto output_columns = output->size(1);
if (!std::is_same<T, float>::value && !std::is_same<T, at::Half>::value) {
CAFFE_THROW("Unsupported data type");
}
#ifdef _OPENMP
vector<float> tmp_vec(input_columns * (GREEDY ? omp_get_max_threads() : 1));
#else
vector<float> tmp_vec(input_columns);
#endif
#pragma omp parallel for if (GREEDY)
for (int row = 0; row < input_rows; ++row) {
float* tmp = tmp_vec.data();
#ifdef _OPENMP
if (GREEDY) {
tmp = &tmp_vec[omp_get_thread_num() * input_columns];
}
#endif
convert(tmp, input_data + row * input_columns, input_columns);
uint8_t* output_row = output_data + row * output_columns;
float* output_row_scale_bias =
reinterpret_cast<float*>(output_row + input_columns);
float minimum_element = *std::min_element(tmp, tmp + input_columns);
float maximum_element = *std::max_element(tmp, tmp + input_columns);
if (GREEDY) {
internal::param_search_greedy(
tmp,
input_columns,
200,
0.16,
minimum_element,
maximum_element,
BIT_RATE);
}
minimum_element = static_cast<at::Half>(minimum_element);
const float range = maximum_element - minimum_element;
const float scale = range == 0
? 1.0f
: static_cast<float>(static_cast<at::Half>(
range / static_cast<float>((1 << BIT_RATE) - 1)));
const float inverse_scale = 1.0f / scale;
output_row_scale_bias[0] = scale;
output_row_scale_bias[1] = minimum_element;
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto col : c10::irange(input_columns)) {
output_row[col] = std::max(
0,
std::min<int>(
std::lrintf((tmp[col] - minimum_element) * inverse_scale),
(1 << BIT_RATE) - 1));
}
}
return true;
}
private:
INPUT_TAGS(DATA_FLOAT);
// INT8 suffix because this is a fake quantization operator whose output
// type is always 8-bit regardless of BIT_RATE.
OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS_INT8);
};
} // namespace caffe2
| 4,346
| 30.05
| 80
|
h
|
null |
pytorch-main/caffe2/operators/fused_rowwise_random_quantization_ops.h
|
#ifndef CAFFE2_OPERATORS_FUSED_ROWWISE_RAND_CONVERSION_OPS_H_
#define CAFFE2_OPERATORS_FUSED_ROWWISE_RAND_CONVERSION_OPS_H_
#include <chrono>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/reducer_functors.h"
#include "caffe2/perfkernels/math.h"
#include "caffe2/utils/math.h"
#ifdef CAFFE2_USE_MKL
#include <mkl.h>
#define FUSED_ROWWISE_RANDOM_QUANTIZATION_USE_MKL
#endif
namespace caffe2 {
template <class Context>
class FloatToFusedRandRowwiseQuantizedOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit FloatToFusedRandRowwiseQuantizedOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
bitwidth_(OperatorBase::GetSingleArgument<int32_t>("bitwidth", 8)),
random_(OperatorBase::GetSingleArgument<bool>("random", true)) {
CAFFE_ENFORCE(
bitwidth_ == 1 || bitwidth_ == 2 || bitwidth_ == 4 || bitwidth_ == 8,
"Unsupported bitwidth");
if (random_) {
#ifdef FUSED_ROWWISE_RANDOM_QUANTIZATION_USE_MKL
int status = vslNewStream(
&vslStream_,
VSL_BRNG_MT19937,
std::chrono::system_clock::now().time_since_epoch().count());
if (status != VSL_STATUS_OK) {
LOG(WARNING) << "vslNewStream returns " << status;
}
#else
gen_.seed(std::chrono::system_clock::now().time_since_epoch().count());
dis_.reset(new std::uniform_real_distribution<float>(0.0f, 1.0f));
#endif
}
}
~FloatToFusedRandRowwiseQuantizedOp() override {
if (random_) {
#ifdef FUSED_ROWWISE_RANDOM_QUANTIZATION_USE_MKL
int status = vslDeleteStream(&vslStream_);
if (status != VSL_STATUS_OK) {
LOG(WARNING) << "vslDeleteStream returns " << status;
}
#endif
}
}
bool RunOnDevice() override;
private:
INPUT_TAGS(DATA_FLOAT);
OUTPUT_TAGS(DATA_FUSED_QUANTIZED);
protected:
size_t bitwidth_{8};
bool random_{true};
std::vector<float> random_buffer_;
#ifdef FUSED_ROWWISE_RANDOM_QUANTIZATION_USE_MKL
VSLStreamStatePtr vslStream_;
#else
std::unique_ptr<std::uniform_real_distribution<float>> dis_;
std::minstd_rand gen_;
#endif
};
template <class Context>
class FusedRandRowwiseQuantizedToFloatOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(FusedRandRowwiseQuantizedToFloatOp)
bool RunOnDevice() override;
private:
INPUT_TAGS(DATA_FUSED_QUANTIZED);
OUTPUT_TAGS(DATA_FLOAT);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FUSED_ROWWISE_RAND_CONVERSION_OPS_H_
| 2,616
| 26.840426
| 77
|
h
|
null |
pytorch-main/caffe2/operators/gather_fused_8bit_rowwise_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class GatherFused8BitRowwiseOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(GatherFused8BitRowwiseOp);
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, this->template Input<Tensor>(INDICES, CPU));
}
template <typename Index>
bool DoRunWithType() {
const auto& data = Input(DATA);
const auto& indices = Input(INDICES);
CAFFE_ENFORCE_EQ(data.dim(), 2, "DATA must be a matrix");
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES must be a vector");
CAFFE_ENFORCE_GT(data.size(1), 8, "DATA must have more than 8 columns");
// Subtract 8 from the #columns of data for the 4 bytes for scale and 4
// bytes for bias that we use in the fused representation (per row).
const std::vector<int64_t> shape = {indices.size(0), data.size(1) - 8};
auto* output = Output(0, shape, at::dtype<float>());
auto block_bytesize = data.size_from_dim(1) * data.dtype().itemsize();
int N = indices.numel();
const uint8_t* src_base = data.template data<uint8_t>();
const Index* idxs = indices.template data<Index>();
auto out = output->template mutable_data<float>();
for (const auto i : c10::irange(N)) {
auto idx = idxs[i];
CAFFE_ENFORCE(
0 <= idx && idx < data.size(0),
"INDICES element is out of DATA bounds, id=",
idx,
" data_dim=",
data.size(0));
const uint8_t* src = src_base + idx * block_bytesize;
ConstEigenVectorArrayMap<uint8_t> input_row_values(src, shape[1]);
ConstEigenVectorArrayMap<float> input_row_scale_bias(
reinterpret_cast<const float*>(src + shape[1]), 2);
EigenVectorArrayMap<float> output_row(out + i * shape[1], shape[1]);
output_row = input_row_values.cast<float>() * input_row_scale_bias(0) +
input_row_scale_bias(1);
}
return true;
}
INPUT_TAGS(DATA, INDICES);
};
} // namespace caffe2
| 2,156
| 32.184615
| 77
|
h
|
null |
pytorch-main/caffe2/operators/gather_op.h
|
#ifndef GATHER_OP_H_
#define GATHER_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include <c10/util/irange.h>
namespace caffe2 {
// This maintains index-mapping functions shared by Gather and BatchGather ops.
namespace gather_helper {
// New shape is concatenation:
// [data dims before axis] + [indices dims] + [data dims after axis]
template <typename IndexType, typename DataDimsVec, typename IndexDimsVec>
static vector<IndexType> calc_output_shape_vector(
const DataDimsVec& data_dims,
const IndexDimsVec& indices_dims,
int axis,
bool match_outer) {
vector<IndexType> shape;
// If the dimension we are indexing is empty, just use data_dims as shape.
// This replicates behavior in (https://github.com/pytorch/pytorch/pull/13781)
// needed to allow workflows with empty batch to succeed.
if (data_dims[axis] == 0) {
shape.insert(shape.end(), data_dims.begin(), data_dims.end());
} else {
shape.insert(shape.end(), data_dims.begin(), data_dims.begin() + axis);
if (match_outer) {
shape.insert(
shape.end(), indices_dims.begin() + axis, indices_dims.end());
} else {
shape.insert(shape.end(), indices_dims.begin(), indices_dims.end());
}
shape.insert(shape.end(), data_dims.begin() + axis + 1, data_dims.end());
}
return shape;
}
// Check that indices fall within dimension array size with CAFFE_ENFORCE.
template <typename IndexType>
static void check_indexarray_range(
const IndexType* indices,
int64_t n,
IndexType indexing_axis_dim,
bool wrap_indices) {
//
for (const auto i : c10::irange(n)) {
auto idx = indices[i];
if (wrap_indices && idx < 0) {
idx = idx + indexing_axis_dim;
}
CAFFE_ENFORCE(
0 <= idx && idx < indexing_axis_dim,
"INDICES element is out of DATA bounds, id=",
idx,
" axis_dim=",
indexing_axis_dim);
}
}
// Actual gather implementation - resizes output and copies indexed data.
template <typename Index, typename Context>
static bool gather_impl(
Operator<Context>* op,
int dataIdx,
int indicesIdx,
int outputIdx,
int axis,
bool wrap_indices,
bool match_outer) {
// If we endup using it on GPU doing O(N) memcpy is probably not best :)
// TODO: implement prefetching if it starts mattering (TF does it)
const Tensor& data = op->Input(dataIdx);
const Tensor& indices = op->Input(indicesIdx);
const TypeMeta dataType = data.dtype();
size_t item_bytesize = dataType.itemsize();
// ONNX allows negative axis to index from the back, valid range: [-r, r].
if (axis < 0) {
axis = data.dim() + axis;
}
CAFFE_ENFORCE_GE(data.dim(), axis + 1, "DATA should be at least [axis+1]-D");
CAFFE_ENFORCE_GE(axis, 0, "Axis should be non-negative");
CAFFE_ENFORCE_LT(axis, data.dim(), "Axis out of range");
// New shape:
// [data dims before axis] + [indices dims] + [data dims after axis]
vector<int64_t> shape = calc_output_shape_vector<int64_t>(
data.sizes(), indices.sizes(), axis, match_outer);
Tensor* output = op->Output(outputIdx, shape, at::dtype(dataType));
auto out = static_cast<char*>(output->raw_mutable_data(dataType));
// Succeed if size of output is zero, which can happen for empty batch which
// would have data dimension size of 0.
// This *must* be done AFTER output->raw_mutable_data() above as that has
// important allocation side effect that we must see.
if (output->numel() == 0) {
return true;
}
const Index* idxs = indices.template data<Index>();
auto src_base = static_cast<const char*>(data.raw_data());
auto outer_dims_product = data.size_to_dim(axis);
auto block_size = data.size_from_dim(axis + 1);
auto block_bytesize = block_size * item_bytesize;
auto src_indexing_axis_dim = data.size(axis);
auto src_batch_bytesize = data.size_from_dim(axis) * item_bytesize;
// Treat indices as a single block even if they have multiple dimensions.
// The "gathered batch" is a cumulative result combining indexed blocks.
auto idx_inner_dims_product = indices.size_from_dim(axis);
auto N = indices.numel();
if (match_outer) {
CAFFE_ENFORCE_GE(axis, 1, "Axis should be at least 1");
for (const auto i : c10::irange(axis)) {
CAFFE_ENFORCE_EQ(
data.size(i),
indices.size(i),
"INDICES must have the same outer dims as DATA (before dim AXIS)");
}
N = idx_inner_dims_product;
}
auto gathered_batch_bytesize = N * block_size * item_bytesize;
check_indexarray_range<Index>(idxs, N, src_indexing_axis_dim, wrap_indices);
// Special-case single-float copy for efficiency
if (data.template IsType<float>() && block_size == 1) {
for (const auto batch : c10::irange(outer_dims_product)) {
const float* src_floats =
(const float*)(src_base + batch * src_batch_bytesize);
float* dst_floats = (float*)(out + batch * gathered_batch_bytesize);
for (const auto i : c10::irange(N)) {
auto idx = idxs[i];
if (match_outer) {
idx = idxs[batch * idx_inner_dims_product + i];
}
if (wrap_indices && idx < 0) {
idx = idx + src_indexing_axis_dim;
}
dst_floats[i] = src_floats[idx];
}
}
} else {
// outer_dims_product specifies how many times we repeat inner dimensions,
// so we just iterate over it to cover all outer dimensions.
for (const auto batch : c10::irange(outer_dims_product)) {
for (const auto i : c10::irange(N)) {
auto idx = idxs[i];
if (match_outer) {
idx = idxs[batch * idx_inner_dims_product + i];
}
if (wrap_indices && idx < 0) {
idx = idx + src_indexing_axis_dim;
}
auto src = src_base + batch * src_batch_bytesize + idx * block_bytesize;
auto dst = out + batch * gathered_batch_bytesize + i * block_bytesize;
op->getContext()->CopyItemsSameDevice(dataType, block_size, src, dst);
}
}
}
return true;
}
} // namespace gather_helper
template <class Context>
class GatherOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit GatherOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, 0),
OP_SINGLE_ARG(bool, "match_outer", match_outer_, false) {
// TBD: We may want to fix the old index wrap behaviour once we have
// operator versioning, to only apply it when needed as otherwise its likely
// an error.
// Right now, we apply index wrapping by default only to axis == 0,
// since we have ONNX conversion code that uses it. For other ops it
// needs to be specified explicitly with argument or you don't get it.
if (OperatorBase::HasArgument("wrap_indices")) {
wrap_indices_ = Operator<Context>::template GetSingleArgument<bool>(
"wrap_indices", (false));
} else {
wrap_indices_ = (axis_ == 0) ? true : false;
}
}
~GatherOp() noexcept override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, this->template Input<Tensor>(INDICES, CPU));
}
template <typename Index>
bool DoRunWithType() {
return gather_helper::gather_impl<Index, Context>(
this, DATA, INDICES, 0, axis_, wrap_indices_, match_outer_);
}
INPUT_TAGS(DATA, INDICES);
protected:
int axis_;
bool wrap_indices_;
bool match_outer_;
};
} // namespace caffe2
#endif // GATHER_OP_H_
| 7,561
| 33.372727
| 80
|
h
|
null |
pytorch-main/caffe2/operators/gather_ranges_to_dense_op.h
|
#ifndef CAFFE2_OPERATORS_GATHER_RANGES_TO_DENSE_OPS_H_
#define CAFFE2_OPERATORS_GATHER_RANGES_TO_DENSE_OPS_H_
#include <math.h>
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include <c10/util/irange.h>
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/proto_utils.h"
#include <cstring>
#include <map>
#include <utility>
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(GatherRangesToDense);
namespace caffe2 {
template <class Context>
class GatherRangesToDenseOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit GatherRangesToDenseOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
lengths_(this->template GetRepeatedArgument<int>("lengths")),
minObservation_(this->template GetSingleArgument<int64_t>(
"min_observation",
10000)),
maxMismatchedRatio_(this->template GetSingleArgument<float>(
"max_mismatched_ratio",
0.01)),
maxEmptyRatio_(
this->template GetSingleArgument<float>("max_empty_ratio", 1.0)) {
CAFFE_ENFORCE_GT(lengths_.size(), 0, "There has to be at least one length");
for (auto length : lengths_) {
CAFFE_ENFORCE_GT(length, 0, "Each length should be positive");
}
CAFFE_ENFORCE_GT(
minObservation_, 0, "The number of observations is at least 1");
// Initialize the empty and mismatch counter.
for (const auto i : c10::irange(OutputSize())) {
(void)i; // Suppress unused variable warning
emptyRanges_.push_back(0);
mismatchedRanges_.push_back(0);
mismatchedLengths_.push_back(set<int>());
}
}
~GatherRangesToDenseOp() noexcept override {
if (totalRanges_ > minObservation_) {
string debugString;
if (this->has_debug_def()) {
debugString =
"Info from operator: " + ProtoDebugString(this->debug_def());
} else {
debugString = "Info from operator: no op def";
}
LOG(INFO) << "In GatherRangesToDenseOp:\n"
<< " Lifetime empty ranges for each feature is "
<< emptyRanges_ << ".\n"
<< " Lifetime mismatched ranges for each feature is "
<< mismatchedRanges_ << ".\n"
<< " With a total of " << totalRanges_ << " examples.\n"
<< debugString;
}
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, this->template Input<Tensor>(RANGES, CPU));
}
template <typename Index>
bool DoRunWithType() {
auto& data = Input(DATA);
auto& ranges = Input(RANGES);
CAFFE_ENFORCE_EQ(data.dim(), 1, "Data has to be 1-D");
CAFFE_ENFORCE_EQ(ranges.dim(), 3, "Ranges has to be 3-D");
if (InputSize() == 3) {
auto& key = Input(KEY);
CAFFE_ENFORCE_EQ(key.dim(), 1, "Key has to be 1-D");
CAFFE_ENFORCE(
key.dtype().template Match<int64_t>(), "Key has to be type int64_t");
}
CAFFE_ENFORCE_EQ(
ranges.size(1),
lengths_.size(),
"Number of ranges should match number of lengths");
CAFFE_ENFORCE_EQ(
ranges.size(1),
OutputSize(),
"Number of ranges should match number of outputs");
CAFFE_ENFORCE_EQ(
ranges.size(2), 2, "Ranges last dimension should be of size 2");
auto* rawData = static_cast<const char*>(data.raw_data());
auto* rangesData = ranges.template data<Index>();
int rangesDataOffset = 0;
auto itemsize = data.dtype().itemsize();
const auto batchSize = ranges.size(0);
vector<int64_t> outputDims{batchSize, 0};
vector<char*> outputRawData;
outputRawData.reserve(OutputSize());
for (const auto i : c10::irange(OutputSize())) {
auto *const output = Output(i);
outputDims[1] = lengths_[i];
output->Resize(outputDims);
char *const ptr = static_cast<char*>(output->raw_mutable_data(data.dtype()));
memset(ptr, 0, output->nbytes());
outputRawData.push_back(ptr);
}
for (const auto i : c10::irange(batchSize)) {
for (const auto j : c10::irange(OutputSize())) {
const auto rangeStart = rangesData[rangesDataOffset++];
const auto rangeLength = rangesData[rangesDataOffset++];
if (rangeLength == 0) {
// empty range, will be filled with zeros
emptyRanges_[j]++;
continue;
}
if (rangeLength != lengths_[j]) {
// Range lengths mismatch for output #, will be filled with zeros
// Note, empty ranges are not counted as mismatched because empty
// are more common and more tolerable.
mismatchedRanges_[j]++;
mismatchedLengths_[j].insert(rangeLength);
continue;
}
if (InputSize() == 2) {
context_.CopyItemsSameDevice(
data.dtype(),
rangeLength,
rawData + rangeStart * itemsize,
outputRawData[j] + i * itemsize * lengths_[j]);
} else {
auto& key = Input(KEY);
auto* key_data = key.template data<int64_t>();
vector<std::pair<int64_t, const char*>> buffer;
buffer.reserve(rangeLength);
for (const auto b_i : c10::irange(rangeLength)) {
int64_t one_key_item = key_data[rangeStart + b_i];
auto* one_data_item = rawData + (rangeStart + b_i) * itemsize;
buffer.emplace_back(one_key_item, one_data_item);
}
std::sort(
buffer.begin(),
buffer.end(),
[](const std::pair<int64_t, const char*>& left,
const std::pair<int64_t, const char*>& right) {
return left.first < right.first;
});
for (const auto b_i : c10::irange(rangeLength)) {
// Since this CPU only, directly copy to the destination.
std::memcpy(
outputRawData[j] + (i * lengths_[j] + b_i) * itemsize,
buffer[b_i].second,
itemsize);
}
}
}
}
CAFFE_ENFORCE_EQ(rangesDataOffset, ranges.numel());
// Check whether the empty and mismatch ratio exceeded the threshold.
totalRanges_ += batchSize;
for (const auto j : c10::irange(OutputSize())) {
// Only check when the ratio is not set to allow all mismatches.
if (maxMismatchedRatio_ < 1.0) {
CAFFE_ENFORCE_GE(
std::max(totalRanges_, minObservation_) * maxMismatchedRatio_,
mismatchedRanges_[j],
"Ratio of range length mismatch for feature at index ",
j,
" is ",
(static_cast<double>(mismatchedRanges_[j]) /
static_cast<double>(totalRanges_)),
" (",
mismatchedRanges_[j],
"/",
totalRanges_,
") which exceeds ",
maxMismatchedRatio_,
". The incorrect lengths include: ",
mismatchedLengths_[j]);
}
// Only check when the ratio is not set to allow all examples to be empty.
if (maxEmptyRatio_ < 1.0) {
CAFFE_ENFORCE_GE(
std::max(totalRanges_, minObservation_) * maxEmptyRatio_,
emptyRanges_[j],
"Ratio of empty ranges for feature at index ",
j,
" is ",
(static_cast<double>(emptyRanges_[j]) /
static_cast<double>(totalRanges_)),
" (",
emptyRanges_[j],
"/",
totalRanges_,
") which exceeds ",
maxEmptyRatio_);
}
}
return true;
}
INPUT_TAGS(DATA, RANGES, KEY);
private:
vector<int> lengths_;
int64_t totalRanges_ = 0;
vector<int64_t> emptyRanges_;
vector<int64_t> mismatchedRanges_;
vector<set<int>> mismatchedLengths_;
// To avoid false alarm due to insufficient sample (e.g., first batch being
// mismatched and causing 100% to be mismatched), use a threshold to ensure
// enough samples are gathered before deciding whether there is an alarm or
// not.
int64_t minObservation_ = 0;
float maxMismatchedRatio_ = 0;
float maxEmptyRatio_ = 0;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_GATHER_RANGES_TO_DENSE_OPS_H_
| 8,423
| 34.1
| 83
|
h
|
null |
pytorch-main/caffe2/operators/gelu_op.h
|
#ifndef CAFFE2_OPERATORS_GELU_OP_H_
#define CAFFE2_OPERATORS_GELU_OP_H_
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/elementwise_ops.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(Gelu);
namespace caffe2 {
namespace gelu_utils {
constexpr float kFastCoeff = 0.044715f;
} // namespace gelu_utils
template <class Context>
struct GeluFunctor {
explicit GeluFunctor(OperatorBase& op)
: fast_gelu(op.GetSingleArgument<bool>("fast_gelu", false)) {}
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const;
const bool fast_gelu;
};
template <class Context>
struct GeluGradientFunctor {
explicit GeluGradientFunctor(OperatorBase& op)
: fast_gelu(op.GetSingleArgument<bool>("fast_gelu", false)) {}
template <typename T>
bool Forward(
const std::vector<int>& dY_dims,
const std::vector<int>& X_dims,
const T* dY,
const T* X,
T* dX,
Context* context) const;
const bool fast_gelu;
};
template <class Context>
using GeluOp = UnaryElementwiseWithArgsOp<
TensorTypes<float>,
Context,
GeluFunctor<Context>>;
template <class Context>
using GeluGradientOp = BinaryElementwiseWithArgsOp<
TensorTypes<float>,
Context,
GeluGradientFunctor<Context>>;
} // namespace caffe2
#endif // CAFFE2_OPERATORS_GELU_OP_H_
| 1,452
| 22.063492
| 73
|
h
|
null |
pytorch-main/caffe2/operators/generate_proposals_op.h
|
#ifndef CAFFE2_OPERATORS_GENERATE_PROPOSALS_OP_H_
#define CAFFE2_OPERATORS_GENERATE_PROPOSALS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(GenerateProposals);
namespace caffe2 {
namespace utils {
// A sub tensor view
// TODO: Remove???
template <class T>
class ConstTensorView {
public:
ConstTensorView(const T* data, const std::vector<int>& dims)
: data_(data), dims_(dims) {}
int ndim() const {
return dims_.size();
}
const std::vector<int>& dims() const {
return dims_;
}
int dim(int i) const {
TORCH_DCHECK_LE(i, dims_.size());
return dims_[i];
}
const T* data() const {
return data_;
}
size_t size() const {
return std::accumulate(
dims_.begin(), dims_.end(), 1, std::multiplies<size_t>());
}
private:
const T* data_ = nullptr;
std::vector<int> dims_;
};
// Generate a list of bounding box shapes for each pixel based on predefined
// bounding box shapes 'anchors'.
// anchors: predefined anchors, size(A, 4)
// Return: all_anchors_vec: (H * W, A * 4)
// Need to reshape to (H * W * A, 4) to match the format in python
TORCH_API ERMatXf ComputeAllAnchors(
const TensorCPU& anchors,
int height,
int width,
float feat_stride);
// Like ComputeAllAnchors, but instead of computing anchors for every single
// spatial location, only computes anchors for the already sorted and filtered
// positions after NMS is applied to avoid unnecessary computation.
// `order` is a raveled array of sorted indices in (A, H, W) format.
TORCH_API ERArrXXf ComputeSortedAnchors(
const Eigen::Map<const ERArrXXf>& anchors,
int height,
int width,
float feat_stride,
const vector<int>& order);
} // namespace utils
// C++ implementation of GenerateProposalsOp
// Generate bounding box proposals for Faster RCNN. The proposals are generated
// for a list of images based on image score 'score', bounding box
// regression result 'deltas' as well as predefined bounding box shapes
// 'anchors'. Greedy non-maximum suppression is applied to generate the
// final bounding boxes.
// Reference: facebookresearch/Detectron/detectron/ops/generate_proposals.py
template <class Context>
class GenerateProposalsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit GenerateProposalsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
spatial_scale_(
this->template GetSingleArgument<float>("spatial_scale", 1.0 / 16)),
feat_stride_(1.0 / spatial_scale_),
rpn_pre_nms_topN_(
this->template GetSingleArgument<int>("pre_nms_topN", 6000)),
rpn_post_nms_topN_(
this->template GetSingleArgument<int>("post_nms_topN", 300)),
rpn_nms_thresh_(
this->template GetSingleArgument<float>("nms_thresh", 0.7f)),
rpn_min_size_(this->template GetSingleArgument<float>("min_size", 16)),
angle_bound_on_(
this->template GetSingleArgument<bool>("angle_bound_on", true)),
angle_bound_lo_(
this->template GetSingleArgument<int>("angle_bound_lo", -90)),
angle_bound_hi_(
this->template GetSingleArgument<int>("angle_bound_hi", 90)),
clip_angle_thresh_(
this->template GetSingleArgument<float>("clip_angle_thresh", 1.0)),
legacy_plus_one_(
this->template GetSingleArgument<bool>("legacy_plus_one", true)) {}
~GenerateProposalsOp() override {}
bool RunOnDevice() override;
// Generate bounding box proposals for a given image
// im_info: [height, width, im_scale]
// all_anchors: (H * W * A, 4)
// bbox_deltas_tensor: (4 * A, H, W)
// scores_tensor: (A, H, W)
// out_boxes: (n, 5)
// out_probs: n
void ProposalsForOneImage(
const Eigen::Array3f& im_info,
const Eigen::Map<const ERArrXXf>& anchors,
const utils::ConstTensorView<float>& bbox_deltas_tensor,
const utils::ConstTensorView<float>& scores_tensor,
ERArrXXf* out_boxes,
EArrXf* out_probs) const;
protected:
// spatial_scale_ must be declared before feat_stride_
float spatial_scale_{1.0};
float feat_stride_{1.0};
// RPN_PRE_NMS_TOP_N
int rpn_pre_nms_topN_{6000};
// RPN_POST_NMS_TOP_N
int rpn_post_nms_topN_{300};
// RPN_NMS_THRESH
float rpn_nms_thresh_{0.7};
// RPN_MIN_SIZE
float rpn_min_size_{16};
// If set, for rotated boxes in RRPN, output angles are normalized to be
// within [angle_bound_lo, angle_bound_hi].
bool angle_bound_on_{true};
int angle_bound_lo_{-90};
int angle_bound_hi_{90};
// For RRPN, clip almost horizontal boxes within this threshold of
// tolerance for backward compatibility. Set to negative value for
// no clipping.
float clip_angle_thresh_{1.0};
// The infamous "+ 1" for box width and height dating back to the DPM days
bool legacy_plus_one_{true};
// Scratch space required by the CUDA version
// CUB buffers
Tensor dev_cub_sort_buffer_{Context::GetDeviceType()};
Tensor dev_cub_select_buffer_{Context::GetDeviceType()};
Tensor dev_image_offset_{Context::GetDeviceType()};
Tensor dev_conv_layer_indexes_{Context::GetDeviceType()};
Tensor dev_sorted_conv_layer_indexes_{Context::GetDeviceType()};
Tensor dev_sorted_scores_{Context::GetDeviceType()};
Tensor dev_boxes_{Context::GetDeviceType()};
Tensor dev_boxes_keep_flags_{Context::GetDeviceType()};
// prenms proposals (raw proposals minus empty boxes)
Tensor dev_image_prenms_boxes_{Context::GetDeviceType()};
Tensor dev_image_prenms_scores_{Context::GetDeviceType()};
Tensor dev_prenms_nboxes_{Context::GetDeviceType()};
Tensor host_prenms_nboxes_{CPU};
Tensor dev_image_boxes_keep_list_{Context::GetDeviceType()};
// Tensors used by NMS
Tensor dev_nms_mask_{Context::GetDeviceType()};
Tensor host_nms_mask_{CPU};
// Buffer for output
Tensor dev_postnms_rois_{Context::GetDeviceType()};
Tensor dev_postnms_rois_probs_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_GENERATE_PROPOSALS_OP_H_
| 6,271
| 33.651934
| 80
|
h
|
null |
pytorch-main/caffe2/operators/generate_proposals_op_util_boxes.h
|
#ifndef CAFFE2_OPERATORS_UTILS_BOXES_H_
#define CAFFE2_OPERATORS_UTILS_BOXES_H_
#include "caffe2/utils/eigen_utils.h"
#include <c10/util/irange.h>
// Bounding box utils for generate_proposals_op
// Reference: facebookresearch/Detectron/detectron/utils/boxes.py
namespace caffe2 {
namespace utils {
// Default value for minimum bounding box width and height after bounding box
// transformation (bbox_transform()) in log-space
const float BBOX_XFORM_CLIP_DEFAULT = log(1000.0 / 16.0);
const float PI = 3.14159265358979323846;
// Forward transform that maps proposal boxes to ground-truth boxes using
// bounding-box regression deltas.
// boxes: pixel coordinates of the bounding boxes
// size (M, 4), format [x1; y1; x2; y2], x2 >= x1, y2 >= y1
// deltas: bounding box translations and scales
// size (M, 4), format [dx; dy; dw; dh]
// dx, dy: scale-invariant translation of the center of the bounding box
// dw, dh: log-space scaling of the width and height of the bounding box
// weights: weights [wx, wy, ww, wh] for the deltas
// bbox_xform_clip: minimum bounding box width and height in log-space after
// transofmration
// correct_transform_coords: Correct bounding box transform coordinates. Set to
// true to match the detectron code, set to false for backward compatibility
// return: pixel coordinates of the bounding boxes
// size (M, 4), format [x1; y1; x2; y2]
// see "Rich feature hierarchies for accurate object detection and semantic
// segmentation" Appendix C for more details
// reference: detectron/lib/utils/boxes.py bbox_transform()
template <class Derived1, class Derived2>
EArrXXt<typename Derived1::Scalar> bbox_transform_upright(
const Eigen::ArrayBase<Derived1>& boxes,
const Eigen::ArrayBase<Derived2>& deltas,
const std::vector<typename Derived2::Scalar>& weights =
std::vector<typename Derived2::Scalar>{1.0, 1.0, 1.0, 1.0},
const float bbox_xform_clip = BBOX_XFORM_CLIP_DEFAULT,
const bool legacy_plus_one = false) {
using T = typename Derived1::Scalar;
using EArrXX = EArrXXt<T>;
using EArrX = EArrXt<T>;
if (boxes.rows() == 0) {
return EArrXX::Zero(T(0), deltas.cols());
}
CAFFE_ENFORCE_EQ(boxes.rows(), deltas.rows());
CAFFE_ENFORCE_EQ(boxes.cols(), 4);
CAFFE_ENFORCE_EQ(deltas.cols(), 4);
EArrX widths = boxes.col(2) - boxes.col(0) + T(int(legacy_plus_one));
EArrX heights = boxes.col(3) - boxes.col(1) + T(int(legacy_plus_one));
auto ctr_x = boxes.col(0) + T(0.5) * widths;
auto ctr_y = boxes.col(1) + T(0.5) * heights;
auto dx = deltas.col(0).template cast<T>() / weights[0];
auto dy = deltas.col(1).template cast<T>() / weights[1];
auto dw =
(deltas.col(2).template cast<T>() / weights[2]).cwiseMin(bbox_xform_clip);
auto dh =
(deltas.col(3).template cast<T>() / weights[3]).cwiseMin(bbox_xform_clip);
EArrX pred_ctr_x = dx * widths + ctr_x;
EArrX pred_ctr_y = dy * heights + ctr_y;
EArrX pred_w = dw.exp() * widths;
EArrX pred_h = dh.exp() * heights;
EArrXX pred_boxes = EArrXX::Zero(deltas.rows(), deltas.cols());
// x1
pred_boxes.col(0) = pred_ctr_x - T(0.5) * pred_w;
// y1
pred_boxes.col(1) = pred_ctr_y - T(0.5) * pred_h;
// x2
pred_boxes.col(2) = pred_ctr_x + T(0.5) * pred_w - T(int(legacy_plus_one));
// y2
pred_boxes.col(3) = pred_ctr_y + T(0.5) * pred_h - T(int(legacy_plus_one));
return pred_boxes;
}
// Like bbox_transform_upright, but works on rotated boxes.
// boxes: pixel coordinates of the bounding boxes
// size (M, 5), format [ctr_x; ctr_y; width; height; angle (in degrees)]
// deltas: bounding box translations and scales
// size (M, 5), format [dx; dy; dw; dh; da]
// dx, dy: scale-invariant translation of the center of the bounding box
// dw, dh: log-space scaling of the width and height of the bounding box
// da: delta for angle in radians
// return: pixel coordinates of the bounding boxes
// size (M, 5), format [ctr_x; ctr_y; width; height; angle (in degrees)]
template <class Derived1, class Derived2>
EArrXXt<typename Derived1::Scalar> bbox_transform_rotated(
const Eigen::ArrayBase<Derived1>& boxes,
const Eigen::ArrayBase<Derived2>& deltas,
const std::vector<typename Derived2::Scalar>& weights =
std::vector<typename Derived2::Scalar>{1.0, 1.0, 1.0, 1.0},
const float bbox_xform_clip = BBOX_XFORM_CLIP_DEFAULT,
const bool angle_bound_on = true,
const int angle_bound_lo = -90,
const int angle_bound_hi = 90) {
using T = typename Derived1::Scalar;
using EArrXX = EArrXXt<T>;
if (boxes.rows() == 0) {
return EArrXX::Zero(T(0), deltas.cols());
}
CAFFE_ENFORCE_EQ(boxes.rows(), deltas.rows());
CAFFE_ENFORCE_EQ(boxes.cols(), 5);
CAFFE_ENFORCE_EQ(deltas.cols(), 5);
const auto& ctr_x = boxes.col(0);
const auto& ctr_y = boxes.col(1);
const auto& widths = boxes.col(2);
const auto& heights = boxes.col(3);
const auto& angles = boxes.col(4);
auto dx = deltas.col(0).template cast<T>() / weights[0];
auto dy = deltas.col(1).template cast<T>() / weights[1];
auto dw =
(deltas.col(2).template cast<T>() / weights[2]).cwiseMin(bbox_xform_clip);
auto dh =
(deltas.col(3).template cast<T>() / weights[3]).cwiseMin(bbox_xform_clip);
// Convert back to degrees
auto da = deltas.col(4).template cast<T>() * 180.0 / PI;
EArrXX pred_boxes = EArrXX::Zero(deltas.rows(), deltas.cols());
// new ctr_x
pred_boxes.col(0) = dx * widths + ctr_x;
// new ctr_y
pred_boxes.col(1) = dy * heights + ctr_y;
// new width
pred_boxes.col(2) = dw.exp() * widths;
// new height
pred_boxes.col(3) = dh.exp() * heights;
// new angle
pred_boxes.col(4) = da + angles;
if (angle_bound_on) {
// Normalize angle to be within [angle_bound_lo, angle_bound_hi].
// Deltas are guaranteed to be <= period / 2 while computing training
// targets by bbox_transform_inv.
const int period = angle_bound_hi - angle_bound_lo;
CAFFE_ENFORCE(period > 0 && period % 180 == 0);
auto angles = pred_boxes.col(4);
for (const auto i : c10::irange(angles.size())) {
if (angles[i] < angle_bound_lo) {
angles[i] += T(period);
} else if (angles[i] > angle_bound_hi) {
angles[i] -= T(period);
}
}
}
return pred_boxes;
}
template <class Derived1, class Derived2>
EArrXXt<typename Derived1::Scalar> bbox_transform(
const Eigen::ArrayBase<Derived1>& boxes,
const Eigen::ArrayBase<Derived2>& deltas,
const std::vector<typename Derived2::Scalar>& weights =
std::vector<typename Derived2::Scalar>{1.0, 1.0, 1.0, 1.0},
const float bbox_xform_clip = BBOX_XFORM_CLIP_DEFAULT,
const bool legacy_plus_one = false,
const bool angle_bound_on = true,
const int angle_bound_lo = -90,
const int angle_bound_hi = 90) {
CAFFE_ENFORCE(boxes.cols() == 4 || boxes.cols() == 5);
if (boxes.cols() == 4) {
// Upright boxes
return bbox_transform_upright(
boxes, deltas, weights, bbox_xform_clip, legacy_plus_one);
} else {
// Rotated boxes with angle info
return bbox_transform_rotated(
boxes,
deltas,
weights,
bbox_xform_clip,
angle_bound_on,
angle_bound_lo,
angle_bound_hi);
}
}
template <class Derived>
EArrXXt<typename Derived::Scalar> bbox_xyxy_to_ctrwh(
const Eigen::ArrayBase<Derived>& boxes,
bool legacy_plus_one = false) {
CAFFE_ENFORCE_EQ(boxes.cols(), 4);
const auto& x1 = boxes.col(0);
const auto& y1 = boxes.col(1);
const auto& x2 = boxes.col(2);
const auto& y2 = boxes.col(3);
EArrXXt<typename Derived::Scalar> ret(boxes.rows(), 4);
ret.col(0) = (x1 + x2) / 2.0; // x_ctr
ret.col(1) = (y1 + y2) / 2.0; // y_ctr
ret.col(2) = x2 - x1 + int(legacy_plus_one); // w
ret.col(3) = y2 - y1 + int(legacy_plus_one); // h
return ret;
}
template <class Derived>
EArrXXt<typename Derived::Scalar> bbox_ctrwh_to_xyxy(
const Eigen::ArrayBase<Derived>& boxes,
const bool legacy_plus_one = false) {
CAFFE_ENFORCE_EQ(boxes.cols(), 4);
const auto& x_ctr = boxes.col(0);
const auto& y_ctr = boxes.col(1);
const auto& w = boxes.col(2);
const auto& h = boxes.col(3);
EArrXXt<typename Derived::Scalar> ret(boxes.rows(), 4);
ret.col(0) = x_ctr - (w - int(legacy_plus_one)) / 2.0; // x1
ret.col(1) = y_ctr - (h - int(legacy_plus_one)) / 2.0; // y1
ret.col(2) = x_ctr + (w - int(legacy_plus_one)) / 2.0; // x2
ret.col(3) = y_ctr + (h - int(legacy_plus_one)) / 2.0; // y2
return ret;
}
// Clip boxes to image boundaries
// boxes: pixel coordinates of bounding box, size (M * 4)
template <class Derived>
EArrXXt<typename Derived::Scalar> clip_boxes_upright(
const Eigen::ArrayBase<Derived>& boxes,
int height,
int width,
bool legacy_plus_one = false) {
CAFFE_ENFORCE(boxes.cols() == 4);
EArrXXt<typename Derived::Scalar> ret(boxes.rows(), boxes.cols());
// x1 >= 0 && x1 < width
ret.col(0) = boxes.col(0).cwiseMin(width - int(legacy_plus_one)).cwiseMax(0);
// y1 >= 0 && y1 < height
ret.col(1) = boxes.col(1).cwiseMin(height - int(legacy_plus_one)).cwiseMax(0);
// x2 >= 0 && x2 < width
ret.col(2) = boxes.col(2).cwiseMin(width - int(legacy_plus_one)).cwiseMax(0);
// y2 >= 0 && y2 < height
ret.col(3) = boxes.col(3).cwiseMin(height - int(legacy_plus_one)).cwiseMax(0);
return ret;
}
// Similar to clip_boxes_upright but handles rotated boxes with angle info.
// boxes: size (M, 5), format [ctr_x; ctr_y; width; height; angle (in degrees)]
//
// Clipping is only performed for boxes that are almost upright
// (within a given `angle_thresh` tolerance) to maintain backward compatibility
// for non-rotated boxes.
//
// We don't clip rotated boxes due to a couple of reasons:
// (1) There are potentially multiple ways to clip a rotated box to make it
// fit within the image.
// (2) It's tricky to make the entire rectangular box fit within the image and
// still be able to not leave out pixels of interest.
// Therefore, we rely on upstream ops like RoIAlignRotated safely handling this.
template <class Derived>
EArrXXt<typename Derived::Scalar> clip_boxes_rotated(
const Eigen::ArrayBase<Derived>& boxes,
int height,
int width,
float angle_thresh = 1.0,
bool legacy_plus_one = false) {
CAFFE_ENFORCE(boxes.cols() == 5);
const auto& angles = boxes.col(4);
// Filter boxes that are upright (with a tolerance of angle_thresh)
EArrXXt<typename Derived::Scalar> upright_boxes;
const auto& indices = GetArrayIndices(angles.abs() <= angle_thresh);
GetSubArrayRows(boxes, AsEArrXt(indices), &upright_boxes);
// Convert to [x1, y1, x2, y2] format and clip them
const auto& upright_boxes_xyxy =
bbox_ctrwh_to_xyxy(upright_boxes.leftCols(4), legacy_plus_one);
const auto& clipped_upright_boxes_xyxy =
clip_boxes_upright(upright_boxes_xyxy, height, width, legacy_plus_one);
// Convert back to [x_ctr, y_ctr, w, h, angle] and update upright boxes
upright_boxes.block(0, 0, upright_boxes.rows(), 4) =
bbox_xyxy_to_ctrwh(clipped_upright_boxes_xyxy, legacy_plus_one);
EArrXXt<typename Derived::Scalar> ret(boxes.rows(), boxes.cols());
ret = boxes;
for (const auto i : c10::irange(upright_boxes.rows())) {
ret.row(indices[i]) = upright_boxes.row(i);
}
return ret;
}
// Clip boxes to image boundaries.
template <class Derived>
EArrXXt<typename Derived::Scalar> clip_boxes(
const Eigen::ArrayBase<Derived>& boxes,
int height,
int width,
float angle_thresh = 1.0,
bool legacy_plus_one = false) {
CAFFE_ENFORCE(boxes.cols() == 4 || boxes.cols() == 5);
if (boxes.cols() == 4) {
// Upright boxes
return clip_boxes_upright(boxes, height, width, legacy_plus_one);
} else {
// Rotated boxes with angle info
return clip_boxes_rotated(
boxes, height, width, angle_thresh, legacy_plus_one);
}
}
// Only keep boxes with both sides >= min_size and center within the image.
// boxes: pixel coordinates of bounding box, size (M * 4)
// im_info: [height, width, img_scale]
// return: row indices for 'boxes'
template <class Derived>
std::vector<int> filter_boxes_upright(
const Eigen::ArrayBase<Derived>& boxes,
double min_size,
const Eigen::Array3f& im_info,
const bool legacy_plus_one = false) {
CAFFE_ENFORCE_EQ(boxes.cols(), 4);
// Scale min_size to match image scale
min_size *= im_info[2];
using T = typename Derived::Scalar;
using EArrX = EArrXt<T>;
EArrX ws = boxes.col(2) - boxes.col(0) + T(int(legacy_plus_one));
EArrX hs = boxes.col(3) - boxes.col(1) + T(int(legacy_plus_one));
EArrX x_ctr = boxes.col(0) + ws / T(2);
EArrX y_ctr = boxes.col(1) + hs / T(2);
EArrXb keep = (ws >= min_size) && (hs >= min_size) &&
(x_ctr < T(im_info[1])) && (y_ctr < T(im_info[0]));
return GetArrayIndices(keep);
}
// Similar to filter_boxes_upright but works for rotated boxes.
// boxes: pixel coordinates of the bounding boxes
// size (M, 5), format [ctr_x; ctr_y; width; height; angle (in degrees)]
// im_info: [height, width, img_scale]
// return: row indices for 'boxes'
template <class Derived>
std::vector<int> filter_boxes_rotated(
const Eigen::ArrayBase<Derived>& boxes,
double min_size,
const Eigen::Array3f& im_info) {
CAFFE_ENFORCE_EQ(boxes.cols(), 5);
// Scale min_size to match image scale
min_size *= im_info[2];
using T = typename Derived::Scalar;
const auto& x_ctr = boxes.col(0);
const auto& y_ctr = boxes.col(1);
const auto& ws = boxes.col(2);
const auto& hs = boxes.col(3);
EArrXb keep = (ws >= min_size) && (hs >= min_size) &&
(x_ctr < T(im_info[1])) && (y_ctr < T(im_info[0]));
return GetArrayIndices(keep);
}
template <class Derived>
std::vector<int> filter_boxes(
const Eigen::ArrayBase<Derived>& boxes,
double min_size,
const Eigen::Array3f& im_info,
const bool legacy_plus_one = false) {
CAFFE_ENFORCE(boxes.cols() == 4 || boxes.cols() == 5);
if (boxes.cols() == 4) {
// Upright boxes
return filter_boxes_upright(boxes, min_size, im_info, legacy_plus_one);
} else {
// Rotated boxes with angle info
return filter_boxes_rotated(boxes, min_size, im_info);
}
}
} // namespace utils
} // namespace caffe2
#endif // CAFFE2_OPERATORS_UTILS_BOXES_H_
| 14,288
| 34.90201
| 80
|
h
|
null |
pytorch-main/caffe2/operators/generate_proposals_op_util_nms_gpu.h
|
#ifndef CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
#define CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
#include <vector>
#include "caffe2/core/context_gpu.h"
namespace caffe2 {
namespace utils {
// Computes Non-Maximum Suppression on the GPU
// Reject a bounding box if its region has an intersection-overunion (IoU)
// overlap with a higher scoring selected bounding box larger than a
// threshold.
//
// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes
// size: (N,4), format: [x1; y1; x2; y2]
// the boxes are sorted by scores in descending order
// N : number of boxes
// d_keep_sorted_list : row indices of the selected proposals, sorted by score
// h_nkeep : number of selected proposals
// dev_delete_mask, host_delete_mask : Tensors that will be used as temp storage
// by NMS
// Those tensors will be resized to the necessary size
// context : current CUDA context
TORCH_API void nms_gpu_upright(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
const bool legacy_plus_one,
int* d_keep_sorted_list,
int* h_nkeep,
TensorCUDA& dev_delete_mask,
TensorCPU& host_delete_mask,
CUDAContext* context);
struct RotatedBox {
float x_ctr, y_ctr, w, h, a;
};
// Same as nms_gpu_upright, but for rotated boxes with angle info.
// d_desc_sorted_boxes : pixel coordinates of proposed bounding boxes
// size: (N,5), format: [x_ct; y_ctr; width; height; angle]
// the boxes are sorted by scores in descending order
TORCH_API void nms_gpu_rotated(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
int* d_keep_sorted_list,
int* h_nkeep,
TensorCUDA& dev_delete_mask,
TensorCPU& host_delete_mask,
CUDAContext* context);
TORCH_API void nms_gpu(
const float* d_desc_sorted_boxes,
const int N,
const float thresh,
const bool legacy_plus_one,
int* d_keep_sorted_list,
int* h_nkeep,
TensorCUDA& dev_delete_mask,
TensorCPU& host_delete_mask,
CUDAContext* context,
const int box_dim);
} // namespace utils
} // namespace caffe2
#endif // CAFFE2_OPERATORS_UTILS_NMS_GPU_H_
| 2,128
| 28.985915
| 80
|
h
|
null |
pytorch-main/caffe2/operators/given_tensor_byte_string_to_uint8_fill_op.h
|
#pragma once
#include <c10/util/irange.h>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/filler_op.h"
#include "caffe2/utils/cast.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class GivenTensorByteStringToUInt8FillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit GivenTensorByteStringToUInt8FillOp(
const OperatorDef& operator_def,
Workspace* ws)
: FillerOp<Context>(operator_def, ws) {
const ArgumentHelper helper(operator_def);
if (!helper.HasArgument("dtype")) {
Extract();
} else {
auto dtype = cast::GetCastDataType(helper, "dtype");
switch (dtype) {
case TensorProto_DataType_STRING:
Extract();
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW("Cannot have undefined 'dtype' argument");
default:
CAFFE_THROW("Unexpected 'dtype' argument value: ", dtype);
}
}
}
bool Fill(Tensor* output) override {
TORCH_DCHECK_EQ(output->numel(), values_.numel())
<< "output size: " << output->numel()
<< " given size: " << values_.numel();
auto* data = output->template mutable_data<uint8_t>();
const uint8_t* values_data = values_.template data<uint8_t>();
if (output->numel()) {
context_.template CopySameDevice<uint8_t>(
output->numel(), values_data, data);
}
return true;
}
private:
void Extract() {
auto source_values = this->template GetRepeatedArgument<string>("values");
TORCH_DCHECK_EQ(source_values.size(), 1)
<< "expected size: 1 "
<< " given size: " << source_values.size();
auto str = source_values[0];
ReinitializeTensor(
&values_,
{static_cast<int64_t>(str.size())},
at::dtype<uint8_t>().device(CPU));
uint8_t* values_data = values_.template mutable_data<uint8_t>();
for (const auto i : c10::irange(str.size())) {
values_data[i] = static_cast<uint8_t>(str[i]);
}
}
Tensor values_;
};
} // namespace caffe2
| 2,146
| 28.819444
| 78
|
h
|
null |
pytorch-main/caffe2/operators/given_tensor_fill_op.h
|
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/filler_op.h"
#include "caffe2/utils/cast.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class GivenTensorFillOp final : public FillerOp<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit GivenTensorFillOp(const OperatorDef& operator_def, Workspace* ws)
: FillerOp<Context>(operator_def, ws) {
const ArgumentHelper helper(operator_def);
// GivenTensorFillOp can be provided with a "dtype" arg if float is
// is specified as T. Otherwise, "dtype" is ignored.
// In the ideal world, we would get rid of templating of T at all, but we
// need to provide backwards compatibility.
if (!std::is_same<T, float>::value || !helper.HasArgument("dtype")) {
ExtractValues<T>();
} else {
auto dtype = cast::GetCastDataType(helper, "dtype");
switch (dtype) {
case TensorProto_DataType_FLOAT:
ExtractValues<float>();
break;
case TensorProto_DataType_DOUBLE:
ExtractValues<double>();
break;
case TensorProto_DataType_BOOL:
ExtractValues<bool>();
break;
case TensorProto_DataType_INT16:
ExtractValues<int16_t>();
break;
case TensorProto_DataType_INT32:
ExtractValues<int>();
break;
case TensorProto_DataType_INT64:
ExtractValues<int64_t>();
break;
case TensorProto_DataType_STRING:
ExtractValues<std::string>();
break;
case TensorProto_DataType_UNDEFINED:
CAFFE_THROW("Cannot have undefined 'dtype' argument");
default:
CAFFE_THROW("Unexpected 'dtype' argument value: ", dtype);
}
}
}
bool Fill(Tensor* output) override {
return (this->*body_)(output);
}
private:
template <typename Type>
void ExtractValues() {
auto source_values = this->template GetRepeatedArgument<Type>("values");
ReinitializeTensor(
&values_,
{static_cast<int64_t>(source_values.size())},
at::dtype<Type>().device(CPU));
Type* values_data = values_.template mutable_data<Type>();
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto i : c10::irange(source_values.size())) {
values_data[i] = static_cast<Type>(source_values[i]);
}
body_ = &GivenTensorFillOp::FillWithType<Type>;
}
template <typename Type>
bool FillWithType(Tensor* output) {
CAFFE_ENFORCE_EQ(output->numel(), values_.numel());
auto* data = output->template mutable_data<Type>();
const Type* values_data = values_.template data<Type>();
if (output->numel()) {
context_.CopyItemsFromCPU(
TypeMeta::Make<Type>(), output->numel(), values_data, data);
}
return true;
}
bool (GivenTensorFillOp::*body_)(Tensor* output);
Tensor values_;
};
} // namespace caffe2
| 3,010
| 31.376344
| 77
|
h
|
null |
pytorch-main/caffe2/operators/glu_op.h
|
#ifndef CAFFE2_OPERATOR_GLU_OP_H_
#define CAFFE2_OPERATOR_GLU_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class GluOp final : public Operator<Context> {
public:
template <class... Args>
explicit GluOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
dim_(this->template GetSingleArgument<int>("dim", -1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto& X = Input(0);
vector<int64_t> Yshape;
Yshape.insert(Yshape.end(), X.sizes().begin(), X.sizes().end());
const int split_index = dim_ == -1 ? Yshape.size() - 1 : dim_;
CAFFE_ENFORCE(
Yshape[split_index] % 2 == 0,
"Split dimension ",
Yshape[split_index],
" should be divided by two");
const int split_dim_size = Yshape[split_index] / 2;
const int M = X.size_to_dim(split_index);
const int N = X.size_from_dim(split_index + 1);
Yshape[split_index] = split_dim_size;
auto* Y = Output(0, Yshape, at::dtype<T>());
ComputeGlu(
M,
split_dim_size,
N,
X.template data<T>(),
Y->template mutable_data<T>());
return true;
}
protected:
void ComputeGlu(
const int M,
const int split_dim_size,
const int N,
const T* X,
T* output);
private:
const int dim_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATOR_GLU_OP_H_
| 1,467
| 24.754386
| 68
|
h
|
null |
pytorch-main/caffe2/operators/group_norm_op.h
|
#ifndef CAFFE2_OPERATORS_GROUP_NORM_OP_H_
#define CAFFE2_OPERATORS_GROUP_NORM_OP_H_
#include <array>
#include <string>
#include <vector>
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class GroupNormOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit GroupNormOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "group", group_, 32),
OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))),
OP_SINGLE_ARG(bool, OpSchema::Arg_IsTest, is_test_, true) {
CAFFE_ENFORCE_NE(
order_,
StorageOrder::UNKNOWN,
"order should be either \"NCHW\" or \"NHWC\".");
if (!is_test_) {
CAFFE_ENFORCE_EQ(OutputSize(), 3);
}
}
bool RunOnDevice() override {
const auto& X = Input(INPUT);
const auto& gamma = Input(GAMMA);
const auto& beta = Input(BETA);
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1);
const size_t HxW = order_ == StorageOrder::NCHW
? X.size_from_dim(2)
: X.size_between_dim(0, ndim - 1);
CAFFE_ENFORCE_EQ(C % group_, 0);
CAFFE_ENFORCE_EQ(gamma.numel(), C);
CAFFE_ENFORCE_EQ(beta.numel(), C);
const int G = group_;
const int K = C / G;
auto* Y = Output(OUTPUT, X.sizes(), at::dtype<T>());
if (N == 0) {
return true;
}
T* mu_data = nullptr;
T* rsig_data = nullptr;
if (OutputSize() == 3) {
auto* mu = Output(MU, {N, G}, at::dtype<T>());
auto* rsig = Output(INV_SIGMA, {N, G}, at::dtype<T>());
mu_data = mu->template mutable_data<T>();
rsig_data = rsig->template mutable_data<T>();
} else {
ReinitializeTensor(
&mu_, {N, G}, at::dtype<T>().device(Context::GetDeviceType()));
ReinitializeTensor(
&rsig_, {N, G}, at::dtype<T>().device(Context::GetDeviceType()));
mu_data = mu_.template mutable_data<T>();
rsig_data = rsig_.template mutable_data<T>();
}
if (order_ == StorageOrder::NCHW) {
return RunOnDeviceWithOrderNCHW(
N,
G,
K,
HxW,
X.template data<T>(),
gamma.template data<T>(),
beta.template data<T>(),
Y->template mutable_data<T>(),
mu_data,
rsig_data);
} else {
return RunOnDeviceWithOrderNHWC(
N,
G,
K,
HxW,
X.template data<T>(),
gamma.template data<T>(),
beta.template data<T>(),
Y->template mutable_data<T>(),
mu_data,
rsig_data);
}
}
private:
bool RunOnDeviceWithOrderNCHW(
const int N,
const int G,
const int K,
const int HxW,
const T* X,
const T* gamma,
const T* beta,
T* Y,
T* mu,
T* rsig) {
const int C = G * K;
ReinitializeTensor(
&scale_, {N, C}, at::dtype<T>().device(Context::GetDeviceType()));
ReinitializeTensor(
&bias_, {N, C}, at::dtype<T>().device(Context::GetDeviceType()));
T* scale_data = scale_.template mutable_data<T>();
T* bias_data = bias_.template mutable_data<T>();
const std::array<int, 2> X_dims = {N * G, K * HxW};
const std::array<int, 2> Y_dims = {N * G, 1};
math::Moments<T, Context>(
2, X_dims.data(), Y_dims.data(), X, mu, rsig, &context_);
math::InvStd<T, Context>(
N * G, static_cast<T>(epsilon_), rsig, rsig, &context_);
ComputeFusedParams(N, G, K, mu, rsig, gamma, beta, scale_data, bias_data);
GroupNormForwardNCHW(N, C, HxW, X, scale_data, bias_data, Y);
return true;
}
bool RunOnDeviceWithOrderNHWC(
const int N,
const int G,
const int K,
const int HxW,
const T* X,
const T* gamma,
const T* beta,
T* Y,
T* mu,
T* rsig) {
const int C = G * K;
ReinitializeTensor(
&scale_, {N, C}, at::dtype<T>().device(Context::GetDeviceType()));
ReinitializeTensor(
&bias_, {N, C}, at::dtype<T>().device(Context::GetDeviceType()));
T* scale_data = scale_.template mutable_data<T>();
T* bias_data = bias_.template mutable_data<T>();
const std::array<int, 4> X_dims = {N, HxW, G, K};
const std::array<int, 4> Y_dims = {N, 1, G, 1};
math::Moments<T, Context>(
4, X_dims.data(), Y_dims.data(), X, mu, rsig, &context_);
math::InvStd<T, Context>(
N * G, static_cast<T>(epsilon_), rsig, rsig, &context_);
ComputeFusedParams(N, G, K, mu, rsig, gamma, beta, scale_data, bias_data);
GroupNormForwardNHWC(N, C, HxW, X, scale_data, bias_data, Y);
return true;
}
void ComputeFusedParams(
int N,
int G,
int K,
const T* mu,
const T* rsig,
const T* gamma,
const T* beta,
T* scale,
T* bias);
void GroupNormForwardNCHW(
const int N,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y);
void GroupNormForwardNHWC(
const int N,
const int C,
const int HxW,
const T* X,
const T* scale,
const T* bias,
T* Y);
const int group_;
const float epsilon_;
const StorageOrder order_;
const bool is_test_;
Tensor mu_;
Tensor rsig_;
Tensor scale_;
Tensor bias_;
// Input: X, gamma, beta
// Output: Y, mu, inv_sig
INPUT_TAGS(INPUT, GAMMA, BETA);
OUTPUT_TAGS(OUTPUT, MU, INV_SIGMA);
};
template <typename T, class Context>
class GroupNormGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit GroupNormGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "group", group_, 32),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))) {
CAFFE_ENFORCE_NE(
order_,
StorageOrder::UNKNOWN,
"order should be either \"NCHW\" or \"NHWC\".");
}
bool RunOnDevice() override {
const auto& dY = Input(OUTPUT_GRAD);
const auto& X = Input(INPUT);
const auto& gamma = Input(GAMMA);
const auto& beta = Input(BETA);
const auto& mu = Input(MU);
const auto& rsig = Input(INV_SIGMA);
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = order_ == StorageOrder::NCHW ? X.dim32(1) : X.dim32(ndim - 1);
const int HxW = X.numel() / (N * C);
CAFFE_ENFORCE_EQ(C % group_, 0);
CAFFE_ENFORCE_EQ(gamma.numel(), C);
CAFFE_ENFORCE_EQ(beta.numel(), C);
const int G = group_;
const int K = C / G;
auto* dX = Output(INPUT_GRAD, X.sizes(), at::dtype<T>());
auto* dgamma = Output(GAMMA_GRAD, gamma.sizes(), at::dtype<T>());
auto* dbeta = Output(BETA_GRAD, beta.sizes(), at::dtype<T>());
if (order_ == StorageOrder::NCHW) {
return RunOnDeviceWithOrderNCHW(
N,
G,
K,
HxW,
dY.template data<T>(),
X.template data<T>(),
mu.template data<T>(),
rsig.template data<T>(),
gamma.template data<T>(),
dX->template mutable_data<T>(),
dgamma->template mutable_data<T>(),
dbeta->template mutable_data<T>());
} else {
return RunOnDeviceWithOrderNHWC(
N,
G,
K,
HxW,
dY.template data<T>(),
X.template data<T>(),
mu.template data<T>(),
rsig.template data<T>(),
gamma.template data<T>(),
dX->template mutable_data<T>(),
dgamma->template mutable_data<T>(),
dbeta->template mutable_data<T>());
}
}
protected:
bool RunOnDeviceWithOrderNCHW(
int N,
int G,
int K,
int HxW,
const T* dY_data,
const T* X_data,
const T* mu_data,
const T* rsig_data,
const T* gamma_data,
T* dX_data,
T* dgamma_data,
T* dbeta_data);
bool RunOnDeviceWithOrderNHWC(
int N,
int G,
int K,
int HxW,
const T* dY_data,
const T* X_data,
const T* mu_data,
const T* rsig_data,
const T* gamma_data,
T* dX_data,
T* dgamma_data,
T* dbeta_data);
const int group_;
const StorageOrder order_;
Tensor ds_;
Tensor db_;
Tensor dY_scale_;
Tensor X_scale_;
Tensor bias_;
Tensor ones_;
// Input: dY, X, gamma, beta, mu, inv_sig
// Output: dX, dgamma, dbeta
INPUT_TAGS(OUTPUT_GRAD, INPUT, GAMMA, BETA, MU, INV_SIGMA);
OUTPUT_TAGS(INPUT_GRAD, GAMMA_GRAD, BETA_GRAD);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_GROUP_NORM_OP_H_
| 8,967
| 27.290221
| 80
|
h
|
null |
pytorch-main/caffe2/operators/gru_unit_op.h
|
#ifndef CAFFE2_OPERATORS_GRU_UNIT_OP_H_
#define CAFFE2_OPERATORS_GRU_UNIT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include <c10/util/irange.h>
namespace caffe2 {
namespace detail {
template <typename T>
inline T sigmoid(T x) {
return 1.0f / (1.0f + exp(-x));
}
template <typename T>
inline T host_tanh(T x) {
return 2.0f * sigmoid(2.0f * x) - 1.0f;
}
template <typename T, typename Context>
void GRUUnit(
int N,
int D,
int t,
const T* H_prev,
const T* X,
const int32_t* seqLengths,
bool drop_states,
T* H,
Context* /*context*/) {
for (const auto n : c10::irange(N)) {
const bool valid = seqLengths == nullptr || t < seqLengths[n];
for (const auto d : c10::irange(D)) {
if (!valid) {
if (drop_states) {
H[d] = 0;
} else {
H[d] = H_prev[d];
}
} else {
const T update = X[1 * D + d];
const T output = X[2 * D + d];
T sigmoid_update = sigmoid(update);
H[d] = H_prev[d] * sigmoid_update +
host_tanh(output) * (1.0f - sigmoid_update);
}
}
H_prev += D;
X += 3 * D;
H += D;
}
}
template <typename T, typename Context>
void GRUUnitGradient(
int N,
int D,
int t,
const T* H_prev,
const T* X,
const int32_t* seqLengths,
const T* H,
const T* H_diff,
bool drop_states,
T* H_prev_diff,
T* X_diff,
Context* /*context*/) {
for (const auto n : c10::irange(N)) {
const bool valid = seqLengths == nullptr || t < seqLengths[n];
for (const auto d : c10::irange(D)) {
T* h_prev_diff = H_prev_diff + d;
T* reset_diff = X_diff + 0 * D + d;
T* update_diff = X_diff + 1 * D + d;
T* output_diff = X_diff + 2 * D + d;
if (!valid) {
if (drop_states) {
*h_prev_diff = 0;
} else {
*h_prev_diff = H_diff[d];
}
*reset_diff = 0;
*update_diff = 0;
*output_diff = 0;
} else {
// Calculate Gate Outputs
const T u = sigmoid(X[1 * D + d]);
const T o = host_tanh(X[2 * D + d]);
*h_prev_diff = H_diff[d] * u;
*reset_diff = 0; // 0 contribution to gradient from this operation
*update_diff = (H_diff[d] * H_prev[d] - H_diff[d] * o) * u * (1.0f - u);
*output_diff = H_diff[d] * (1.0f - u) * (1.0f - o * o);
}
}
H_prev += D;
X += 3 * D;
H += D;
H_diff += D;
X_diff += 3 * D;
H_prev_diff += D;
}
}
} // namespace detail
template <typename T, typename Context>
class GRUUnitOp : public Operator<Context> {
public:
template <class... Args>
explicit GRUUnitOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
drop_states_(
this->template GetSingleArgument<bool>("drop_states", false)),
sequence_lengths_(
this->template GetSingleArgument<bool>("sequence_lengths", true)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// handle potentially-missing sequence lengths input
const size_t TIMESTEP = SEQ_LENGTHS + (sequence_lengths_ ? 1 : 0);
// Extract N
const auto N = Input(HIDDEN_T_M_1).size(1);
// Gates: 1xNxG
const auto G = Input(GATES).size(2);
const auto D = Input(HIDDEN_T_M_1).size(2);
CAFFE_ENFORCE_EQ(3 * D, G);
const auto* H_prev = Input(HIDDEN_T_M_1).template data<T>();
const auto* X = Input(GATES).template data<T>();
const int32_t* seqLengths = nullptr;
if (sequence_lengths_) {
CAFFE_ENFORCE_EQ(Input(SEQ_LENGTHS).numel(), N);
seqLengths = Input(SEQ_LENGTHS).template data<int32_t>();
}
const auto t = static_cast<OperatorBase*>(this)
->Input<Tensor>(TIMESTEP, CPU)
.template data<int32_t>()[0];
Output(HIDDEN_T)->ResizeLike(Input(HIDDEN_T_M_1));
auto* H = Output(HIDDEN_T)->template mutable_data<T>();
detail::GRUUnit<T, Context>(
N, D, t, H_prev, X, seqLengths, drop_states_, H, &context_);
return true;
}
protected:
INPUT_TAGS(HIDDEN_T_M_1, GATES, SEQ_LENGTHS);
// additional input tags are determined dynamically based on whether
// sequence_lengths is present.
OUTPUT_TAGS(HIDDEN_T);
private:
bool drop_states_;
bool sequence_lengths_;
};
template <typename T, typename Context>
class GRUUnitGradientOp : public Operator<Context> {
public:
template <class... Args>
explicit GRUUnitGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
drop_states_(
this->template GetSingleArgument<bool>("drop_states", false)),
sequence_lengths_(
this->template GetSingleArgument<bool>("sequence_lengths", true)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
// handle potentially-missing sequence lengths input
const size_t inputOffset = SEQ_LENGTHS + (sequence_lengths_ ? 1 : 0);
const size_t TIMESTEP = inputOffset;
const size_t HIDDEN_T = inputOffset + 1;
const size_t HIDDEN_T_GRAD = inputOffset + 2;
// Extract N
const auto N = Input(HIDDEN_T_M_1).size(1);
// Gates: 1xNxG
const auto G = Input(GATES).size(2);
const auto D = Input(HIDDEN_T_M_1).size(2);
CAFFE_ENFORCE_EQ(3 * D, G);
const auto* H_prev = Input(HIDDEN_T_M_1).template data<T>();
const auto* X = Input(GATES).template data<T>();
const auto t = static_cast<OperatorBase*>(this)
->Input<Tensor>(TIMESTEP, CPU)
.template data<int32_t>()[0];
const auto* H = Input(HIDDEN_T).template data<T>();
const auto* H_diff = Input(HIDDEN_T_GRAD).template data<T>();
const int32_t* seqLengths = nullptr;
if (sequence_lengths_) {
CAFFE_ENFORCE_EQ(Input(SEQ_LENGTHS).numel(), N);
seqLengths = Input(SEQ_LENGTHS).template data<int32_t>();
}
Output(HIDDEN_T_M_1_GRAD)->ResizeLike(Input(HIDDEN_T_M_1));
auto* H_prev_diff = Output(HIDDEN_T_M_1_GRAD)->template mutable_data<T>();
Output(GATES_GRAD)->ResizeLike(Input(GATES));
auto* X_diff = Output(GATES_GRAD)->template mutable_data<T>();
detail::GRUUnitGradient<T, Context>(
N,
D,
t,
H_prev,
X,
seqLengths,
H,
H_diff,
drop_states_,
H_prev_diff,
X_diff,
&context_);
return true;
}
protected:
INPUT_TAGS(HIDDEN_T_M_1, GATES, SEQ_LENGTHS);
OUTPUT_TAGS(HIDDEN_T_M_1_GRAD, GATES_GRAD);
private:
bool drop_states_;
bool sequence_lengths_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_GRU_UNIT_OP_H_
| 6,687
| 26.636364
| 80
|
h
|
null |
pytorch-main/caffe2/operators/h_softmax_op.h
|
#ifndef CAFFE2_OPERATORS_H_SOFTMAX_OP_H_
#define CAFFE2_OPERATORS_H_SOFTMAX_OP_H_
#include <c10/util/Optional.h>
#include <c10/util/irange.h>
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/proto/hsm.pb.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, typename Context>
class HSoftmaxOpBase : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit HSoftmaxOpBase(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
HierarchyProto hierarchy;
CAFFE_ENFORCE(hierarchy.ParseFromString(
this->template GetSingleArgument<string>("hierarchy", "")));
for (const auto& path : hierarchy.paths()) {
hierarchy_all_map_.emplace(path.word_id(), path);
}
}
protected:
std::unordered_map<int, PathProto> hierarchy_all_map_;
c10::optional<Tensor> scale_;
c10::optional<Tensor> sum_multiplier_;
c10::optional<Tensor> bias_multiplier_;
static constexpr T kLOG_THRESHOLD() {
return 1e-20f;
}
static std::unordered_map<int, PathProto> getHierarchyForLabels(
int M,
const int* labels,
const std::unordered_map<int, PathProto>& hierarchy_all_map) {
std::unordered_map<int, PathProto> hierarchy_map;
std::set<int> label_set = std::set<int>(labels, labels + M);
for (const auto& label : label_set) {
auto search = hierarchy_all_map.find(label);
CAFFE_ENFORCE(search != hierarchy_all_map.end(), "incorrect label.");
hierarchy_map.emplace(search->first, search->second);
}
return hierarchy_map;
}
int getIntermediateOutputSize(
const int* labels,
int M,
std::unordered_map<int, PathProto>& hierarchy) const {
int size = 0;
for (const auto label : c10::irange(M)) {
int word_id = labels[label];
const auto& path = hierarchy[word_id];
size += std::accumulate(
path.path_nodes().begin(),
path.path_nodes().end(),
0,
// Output of FC + Output of Softmax
[](int sz, PathNodeProto node) { return sz + 2 * node.length(); });
}
return size;
}
};
template <typename T, class Context>
class HSoftmaxOp : public HSoftmaxOpBase<T, Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
using HSoftmaxOpBase<T, Context>::HSoftmaxOpBase;
bool RunOnDevice() override;
protected:
float RunForwardSingle(
const float* X,
const float* W,
const float* b,
int target,
float* output,
const float* bias_multiplier,
int w_length,
int K,
int& output_offset);
};
template <typename T, class Context>
class HSoftmaxGradientOp final : public HSoftmaxOpBase<T, Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
using HSoftmaxOpBase<T, Context>::HSoftmaxOpBase;
bool RunOnDevice() override;
private:
void RunBackwardSingle(
const float* X,
const float* dY,
const float* W,
int target,
const float* int_output,
float* dX,
float* dW,
float* db,
float* dOutput,
int dim_in,
int w_length,
int& output_offset);
};
template <typename T, class Context>
class HSoftmaxSearchOp final : public HSoftmaxOp<T, Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit HSoftmaxSearchOp(Args&&... args)
: HSoftmaxOp<T, Context>(std::forward<Args>(args)...),
top_n_(this->template GetSingleArgument<int>("topN", 5)),
beam_(this->template GetSingleArgument<float>("beam", 0.01f)) {
CAFFE_ENFORCE(tree_.ParseFromString(
this->template GetSingleArgument<string>("tree", "")));
}
bool RunOnDevice() override;
private:
int top_n_;
float beam_;
TreeProto tree_;
bool pruning(
const float* X,
int sample,
int K,
const float* W,
const float* b,
const NodeProto& src_node,
NodeProto& dst_node,
float parent_score,
float beam);
bool extractNodes(
const NodeProto& node,
std::vector<std::pair<string, float>>& info);
};
template <typename T, class Context>
class HuffmanTreeHierarchyOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit HuffmanTreeHierarchyOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
num_classes_(this->template GetSingleArgument<int>("num_classes", -1)) {
}
bool RunOnDevice() override;
private:
// Internal huffman tree data.
struct Node {
Node(T l, int count)
: label(l), count(count), left_ch_index(-1), right_ch_index(-1) {}
T label;
int count;
int left_ch_index;
int right_ch_index;
};
struct NodeComparator {
bool operator()(const Node& node_a, const Node& node_b) {
return node_a.count > node_b.count;
}
};
int num_classes_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SOFTMAX_OP_H_
| 4,983
| 26.688889
| 80
|
h
|
null |
pytorch-main/caffe2/operators/half_float_ops.h
|
#ifndef CAFFE2_OPERATORS_HALF_FLOAT_OPS_H_
#define CAFFE2_OPERATORS_HALF_FLOAT_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class FloatToHalfOp : public Operator<Context> {
public:
explicit FloatToHalfOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
clip_(this->template GetSingleArgument<bool>("clip", false)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
private:
bool clip_;
};
template <class Context>
class HalfToFloatOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(HalfToFloatOp);
bool RunOnDevice() override;
};
class Float16ConstantFillOp : public Operator<CPUContext> {
public:
template <class... Args>
explicit Float16ConstantFillOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...),
shape_(this->template GetRepeatedArgument<int64_t>("shape")) {}
USE_OPERATOR_FUNCTIONS(CPUContext);
~Float16ConstantFillOp() override {}
bool RunOnDevice() override;
private:
vector<int64_t> shape_;
};
template <class Context>
class Float16UniformFillOp : public Operator<Context> {
public:
template <class... Args>
explicit Float16UniformFillOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
shape_(this->template GetRepeatedArgument<int64_t>("shape")),
min_(this->template GetSingleArgument<float>("min", 0)),
max_(this->template GetSingleArgument<float>("max", 1)) {
if (InputSize() == 3) {
CAFFE_ENFORCE(
!this->template HasSingleArgumentOfType<float>("min"),
"Cannot set both min arg and min input blob");
CAFFE_ENFORCE(
!this->template HasSingleArgumentOfType<float>("max"),
"Cannot set both max arg and max input blob");
} else {
CAFFE_ENFORCE_LT(
min_, max_, "Max value should be bigger than min value.");
}
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
~Float16UniformFillOp() override {}
bool RunOnDevice() override;
private:
vector<int64_t> shape_;
float min_;
float max_;
Tensor temp_data_buffer_;
};
inline std::vector<TensorShape> Float16FillerTensorInference(
const OperatorDef& def,
const vector<TensorShape>& in) {
vector<TensorShape> out(1);
ArgumentHelper helper(def);
out[0].set_data_type(static_cast<TensorProto_DataType>(
helper.GetSingleArgument<int>("dtype", TensorProto_DataType_FLOAT16)));
auto shape = helper.GetRepeatedArgument<int>("shape");
for (int d : shape) {
out[0].add_dims(d);
}
return out;
}
} // namespace caffe2
#endif // CAFFE2_OPERATORS_HALF_FLOAT_OPS_H_
| 2,734
| 26.35
| 77
|
h
|
null |
pytorch-main/caffe2/operators/hard_sigmoid_op.h
|
#ifndef CAFFE2_OPERATORS_HARD_SIGMOID_H_
#define CAFFE2_OPERATORS_HARD_SIGMOID_H_
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
namespace caffe2 {
template <class Context>
struct HardSigmoidFunctor {
explicit HardSigmoidFunctor(OperatorBase& op)
: alpha(op.GetSingleArgument<float>("alpha", 0.2f)),
beta(op.GetSingleArgument<float>("beta", 0.5f)) {}
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const;
const float alpha, beta;
};
template <class Context>
struct HardSigmoidGradientFunctor {
explicit HardSigmoidGradientFunctor(OperatorBase& op)
: alpha(op.GetSingleArgument<float>("alpha", 0.2f)) {}
template <typename T>
bool Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& dY_dims,
const T* Y,
const T* dY,
T* dX,
Context* context) const;
const float alpha;
};
} // namespace caffe2
#endif // CAFFE2CAFFE2_OPERATORS_HARD_SIGMOID_H_
| 994
| 22.690476
| 73
|
h
|
null |
pytorch-main/caffe2/operators/heatmap_max_keypoint_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef HEATMAP_MAX_KEYPOINT_OP_H_
#define HEATMAP_MAX_KEYPOINT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(HeatmapMaxKeypoint)
namespace caffe2 {
template <typename T, class Context>
class HeatmapMaxKeypointOp final : public Operator<Context> {
public:
template <class... Args>
explicit HeatmapMaxKeypointOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
should_output_softmax_(this->template GetSingleArgument<bool>(
"should_output_softmax",
false)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
bool should_output_softmax_ = false;
};
} // namespace caffe2
#endif // HEATMAP_MAX_KEYPOINT_OP_H_
| 939
| 25.111111
| 70
|
h
|
null |
pytorch-main/caffe2/operators/histogram_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
#include <cmath>
#include <limits>
namespace caffe2 {
template <class Context>
class HistogramOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit HistogramOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
bin_edges_(this->template GetRepeatedArgument<float>("bin_edges")) {
CAFFE_ENFORCE_GE(
bin_edges_.size(),
2,
"Number of bin edges must be greater than or equal to 2.");
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto i : c10::irange(1, bin_edges_.size())) {
CAFFE_ENFORCE_GT(
bin_edges_[i],
bin_edges_[i - 1],
"bin_edges must be a strictly increasing sequence of values.");
}
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
CheckInputs();
const auto* histogram = Output(HISTOGRAM);
histogram->Resize(bin_edges_.size() - 1);
auto* histogram_data = histogram->template mutable_data<int64_t>();
math::Set<int64_t, Context>(
bin_edges_.size() - 1, 0, histogram_data, &context_);
for (const auto input_idx : c10::irange(InputSize())) {
const auto& x = Input(input_idx);
const int64_t N = x.numel();
const auto* x_data = x.template data<T>();
for (const auto data_idx : c10::irange(N)) {
const auto bisection_it = std::upper_bound(
bin_edges_.begin(), bin_edges_.end(), x_data[data_idx]);
const int bisection_idx = bisection_it - bin_edges_.begin();
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
if (bisection_idx > 0 && bisection_idx < bin_edges_.size()) {
histogram_data[bisection_idx - 1]++;
}
}
}
return true;
}
protected:
OUTPUT_TAGS(HISTOGRAM);
private:
vector<float> bin_edges_;
void CheckInputs() {
const auto& input_zero = Input(0);
for (const auto i : c10::irange(1, InputSize())) {
CAFFE_ENFORCE_EQ(
Input(i).dtype(),
input_zero.dtype(),
"All inputs must have the same type; expected ",
input_zero.dtype().name(),
" but got ",
Input(i).dtype().name(),
" for input ",
i);
}
}
};
} // namespace caffe2
| 2,455
| 27.229885
| 76
|
h
|
null |
pytorch-main/caffe2/operators/if_op.h
|
#ifndef CAFFE2_OPERATORS_IF_OP_H_
#define CAFFE2_OPERATORS_IF_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class IfOp final : public Operator<Context> {
public:
explicit IfOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {
CAFFE_ENFORCE(
this->template HasSingleArgumentOfType<NetDef>("then_net"),
"then_net must be specified in If operator");
auto then_net_def =
this->template GetSingleArgument<NetDef>("then_net", NetDef());
then_net_ = CreateNet(then_net_def, ws);
CAFFE_ENFORCE(then_net_, "Failed to initialize then subnet");
if (this->template HasSingleArgumentOfType<NetDef>("else_net")) {
auto else_net_def =
this->template GetSingleArgument<NetDef>("else_net", NetDef());
else_net_ = CreateNet(else_net_def, ws);
CAFFE_ENFORCE(else_net_, "Failed to initialize else subnet");
}
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
CAFFE_ENFORCE(
this->InputIsTensorType(0, Context::GetDeviceType()),
"Invalid condition in If operator: tensor expected");
const auto& condition = Input(0);
CAFFE_ENFORCE_EQ(
condition.numel(),
1,
"Invalid condition tensor in If operator: single value expected");
auto conditionValue = *condition.template data<bool>();
if (conditionValue) {
return then_net_->Run();
} else if (else_net_) {
return else_net_->Run();
}
return true;
}
private:
std::unique_ptr<NetBase> then_net_;
std::unique_ptr<NetBase> else_net_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_IF_OP_H_
| 1,764
| 27.467742
| 74
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.