repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/caffe2/operators/im2col_op.h
|
#ifndef CAFFE2_OPERATORS_IM2COL_OP_H_
#define CAFFE2_OPERATORS_IM2COL_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <typename T, class Context>
class Im2ColOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit Im2ColOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
pad_(this->template GetSingleArgument<int>("pad", 0)),
kernel_h_(this->template GetSingleArgument<int>(
"kernel_h",
this->template GetSingleArgument<int>("kernel", 0))),
kernel_w_(this->template GetSingleArgument<int>(
"kernel_w",
this->template GetSingleArgument<int>("kernel", 0))),
dilation_h_(this->template GetSingleArgument<int>(
"dilation_h",
this->template GetSingleArgument<int>("dilation", 1))),
dilation_w_(this->template GetSingleArgument<int>(
"dilation_w",
this->template GetSingleArgument<int>("dilation", 1))),
stride_h_(this->template GetSingleArgument<int>(
"stride_h",
this->template GetSingleArgument<int>("stride", 1))),
stride_w_(this->template GetSingleArgument<int>(
"stride_w",
this->template GetSingleArgument<int>("stride", 1))),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE(kernel_h_ > 0);
CAFFE_ENFORCE(kernel_w_ > 0);
CAFFE_ENFORCE(dilation_h_ > 0);
CAFFE_ENFORCE(dilation_w_ > 0);
CAFFE_ENFORCE(stride_h_ > 0);
CAFFE_ENFORCE(stride_w_ > 0);
CAFFE_ENFORCE(pad_ >= 0);
}
bool RunOnDevice() override {
auto& X = Input(0);
CAFFE_ENFORCE(4 == X.dim());
int N = 0, C = 0, H = 0, W = 0;
switch (order_) {
case StorageOrder::NCHW:
N = X.dim32(0);
C = X.dim32(1);
H = X.dim32(2);
W = X.dim32(3);
break;
case StorageOrder::NHWC:
N = X.dim32(0);
H = X.dim32(1);
W = X.dim32(2);
C = X.dim32(3);
break;
default:
CAFFE_THROW("Unknown storage order: ", order_);
}
const int dkernel_h = dilation_h_ * (kernel_h_ - 1) + 1;
const int dkernel_w = dilation_w_ * (kernel_w_ - 1) + 1;
CAFFE_ENFORCE(H >= dkernel_h);
CAFFE_ENFORCE(W >= dkernel_w);
const int out_h = (H + 2 * pad_ - dkernel_h) / stride_h_ + 1;
const int out_w = (W + 2 * pad_ - dkernel_w) / stride_w_ + 1;
switch (order_) {
case StorageOrder::NCHW: {
auto* Y = Output(
0,
std::vector<int64_t>{N, C * kernel_h_ * kernel_w_, out_h, out_w},
at::dtype<T>());
const size_t dx = X.numel() / N;
const size_t dy = Y->numel() / N;
for (const auto n : c10::irange(N)) {
const auto* xdata = X.template data<T>() + (n * dx);
auto* ydata = Y->template mutable_data<T>() + (n * dy);
math::Im2Col<T, Context, StorageOrder::NCHW>(
C,
H,
W,
kernel_h_,
kernel_w_,
dilation_h_,
dilation_w_,
pad_,
pad_,
pad_,
pad_,
stride_h_,
stride_w_,
xdata,
ydata,
&context_);
}
}; break;
case StorageOrder::NHWC: {
auto* Y = Output(
0,
std::vector<int64_t>{N, out_h, out_w, kernel_h_ * kernel_w_ * C},
at::dtype<T>());
const size_t dx = X.numel() / N;
const size_t dy = Y->numel() / N;
for (const auto n : c10::irange(N)) {
const auto* xdata = X.template data<T>() + (n * dx);
auto* ydata = Y->template mutable_data<T>() + (n * dy);
math::Im2Col<T, Context, StorageOrder::NHWC>(
C,
H,
W,
kernel_h_,
kernel_w_,
dilation_h_,
dilation_w_,
pad_,
pad_,
pad_,
pad_,
stride_h_,
stride_w_,
xdata,
ydata,
&context_);
}
}; break;
default:
CAFFE_THROW("Unknown storage order: ", order_);
}
return true;
}
private:
int pad_;
int kernel_h_;
int kernel_w_;
int dilation_h_;
int dilation_w_;
int stride_h_;
int stride_w_;
StorageOrder order_;
};
template <typename T, class Context>
class Col2ImOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit Col2ImOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
pad_(this->template GetSingleArgument<int>("pad", 0)),
kernel_h_(this->template GetSingleArgument<int>(
"kernel_h",
this->template GetSingleArgument<int>("kernel", 0))),
kernel_w_(this->template GetSingleArgument<int>(
"kernel_w",
this->template GetSingleArgument<int>("kernel", 0))),
dilation_h_(this->template GetSingleArgument<int>(
"dilation_h",
this->template GetSingleArgument<int>("dilation", 1))),
dilation_w_(this->template GetSingleArgument<int>(
"dilation_w",
this->template GetSingleArgument<int>("dilation", 1))),
stride_h_(this->template GetSingleArgument<int>(
"stride_h",
this->template GetSingleArgument<int>("stride", 1))),
stride_w_(this->template GetSingleArgument<int>(
"stride_w",
this->template GetSingleArgument<int>("stride", 1))),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE(kernel_h_ > 0);
CAFFE_ENFORCE(kernel_w_ > 0);
CAFFE_ENFORCE(dilation_h_ > 0);
CAFFE_ENFORCE(dilation_w_ > 0);
CAFFE_ENFORCE(stride_h_ > 0);
CAFFE_ENFORCE(stride_w_ > 0);
CAFFE_ENFORCE(pad_ >= 0);
}
bool RunOnDevice() override {
auto& X = Input(0);
auto& Z = Input(1);
auto* Y = Output(0, Z.sizes(), at::dtype<T>());
CAFFE_ENFORCE(4 == Y->dim());
int N = 0, C = 0, H = 0, W = 0;
switch (order_) {
case StorageOrder::NCHW:
N = Y->dim32(0);
C = Y->dim32(1);
H = Y->dim32(2);
W = Y->dim32(3);
break;
case StorageOrder::NHWC:
N = Y->dim32(0);
H = Y->dim32(1);
W = Y->dim32(2);
C = Y->dim32(3);
break;
default:
CAFFE_THROW("Unknown storage order: ", order_);
}
const int dkernel_h = dilation_h_ * (kernel_h_ - 1) + 1;
const int dkernel_w = dilation_w_ * (kernel_w_ - 1) + 1;
CAFFE_ENFORCE(H >= dkernel_h);
CAFFE_ENFORCE(W >= dkernel_w);
const int out_h = (H + 2 * pad_ - dkernel_h) / stride_h_ + 1;
const int out_w = (W + 2 * pad_ - dkernel_w) / stride_w_ + 1;
CAFFE_ENFORCE(X.numel() == N * kernel_h_ * kernel_w_ * C * out_h * out_w);
const size_t dx = X.numel() / N;
const size_t dy = Y->numel() / N;
// could template-specialize this, but it's test code...
switch (order_) {
case StorageOrder::NCHW: {
for (const auto n : c10::irange(N)) {
const auto* xdata = X.template data<T>() + (n * dx);
auto* ydata = Y->template mutable_data<T>() + (n * dy);
math::Col2Im<T, Context, StorageOrder::NCHW>(
C,
H,
W,
kernel_h_,
kernel_w_,
dilation_h_,
dilation_w_,
pad_,
pad_,
pad_,
pad_,
stride_h_,
stride_w_,
xdata,
ydata,
&context_);
}
}; break;
case StorageOrder::NHWC: {
for (const auto n : c10::irange(N)) {
const auto* xdata = X.template data<T>() + (n * dx);
auto* ydata = Y->template mutable_data<T>() + (n * dy);
math::Col2Im<T, Context, StorageOrder::NHWC>(
C,
H,
W,
kernel_h_,
kernel_w_,
dilation_h_,
dilation_w_,
pad_,
pad_,
pad_,
pad_,
stride_h_,
stride_w_,
xdata,
ydata,
&context_);
}
}; break;
default:
CAFFE_THROW("Unknown storage order: ", order_);
}
return true;
}
private:
int pad_;
int kernel_h_;
int kernel_w_;
int dilation_h_;
int dilation_w_;
int stride_h_;
int stride_w_;
StorageOrder order_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_IM2COL_OP_H_
| 9,004
| 29.016667
| 78
|
h
|
null |
pytorch-main/caffe2/operators/index_hash_ops.h
|
#ifndef CAFFE2_OPERATORS_INDEX_HASH_OPS_H_
#define CAFFE2_OPERATORS_INDEX_HASH_OPS_H_
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include <c10/util/irange.h>
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(IndexHash);
namespace caffe2 {
template <class Context>
class IndexHashOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit IndexHashOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
seed_(this->template GetSingleArgument<int64_t>("seed", 0)),
modulo_(this->template GetSingleArgument<int64_t>("modulo", 0)) {
CAFFE_ENFORCE_GT(modulo_, 0, "MODULO should be > 0");
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename T>
bool DoRunWithType() {
auto& indices = Input(INDICES);
auto* hashed_indices =
Output(HASHED_INDICES, indices.sizes(), at::dtype<T>());
CAFFE_ENFORCE_GE(
static_cast<int64_t>(std::numeric_limits<T>::max()),
modulo_,
"MODULO shouldn't be larger than the numeric limit of the indices");
auto N = indices.numel();
auto* indices_data = indices.template data<T>();
auto* hashed_indices_data = hashed_indices->template mutable_data<T>();
for (const auto i : c10::irange(N)) {
hashed_indices_data[i] = hash(indices_data[i]);
}
return true;
}
protected:
template <typename T>
__ubsan_ignore_signed_int_overflow__
T hash(T id) {
int8_t* bytes = (int8_t*)&id;
T hashed = seed_ * 0xDEADBEEF;
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int i = 0; i < sizeof(T) / sizeof(int8_t); i++) {
hashed = hashed * 65537 + bytes[i];
}
// We want the result of the modulo to be positive. This works under the
// assumption that modulo_ > 0 which is enforced in the constructor.
auto modHashed = hashed % modulo_;
return modHashed >= 0 ? modHashed : modHashed + modulo_;
}
private:
INPUT_TAGS(INDICES);
OUTPUT_TAGS(HASHED_INDICES);
int64_t seed_;
int64_t modulo_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_INDEX_HASH_OPS_H_
| 2,268
| 27.3625
| 76
|
h
|
null |
pytorch-main/caffe2/operators/index_ops.h
|
#ifndef CAFFE2_OPERATORS_INDEX_OPS_H_
#define CAFFE2_OPERATORS_INDEX_OPS_H_
#include <limits>
#include <mutex>
#include <sstream>
#include <unordered_map>
#include <vector>
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "c10/util/irange.h"
namespace caffe2 {
namespace {
using IndexKeyTypes = TensorTypes<int32_t, int64_t, std::string>;
using int64_tValue = int64_t;
} // namespace
struct IndexBase {
public:
IndexBase(int64_tValue maxElements, const TypeMeta type)
: maxElements_{maxElements}, meta_(type), frozen_{false} {}
void Freeze() {
frozen_ = true;
}
bool isFrozen() const {
return frozen_;
}
int64_t maxElements() const {
return maxElements_;
}
virtual ~IndexBase() {}
const TypeMeta Type() const {
return meta_;
}
int64_tValue Size() {
std::lock_guard<std::mutex> guard(dictMutex_);
return nextId_;
}
protected:
int64_t maxElements_;
TypeMeta meta_;
int64_tValue nextId_{1}; // guarded by dictMutex_
std::atomic<bool> frozen_{false};
std::mutex dictMutex_;
};
template <typename T>
struct Index : IndexBase {
explicit Index(int64_tValue maxElements)
: IndexBase(maxElements, TypeMeta::Make<T>()) {}
void Get(const T* keys, int64_tValue* values, size_t numKeys) {
if (frozen_) {
FrozenGet(keys, values, numKeys);
return;
}
std::lock_guard<std::mutex> lock(dictMutex_);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto i : c10::irange(numKeys)) {
auto it = dict_.find(keys[i]);
if (it != dict_.end()) {
values[i] = it->second;
} else if (nextId_ < maxElements_) {
auto newValue = nextId_++;
dict_.insert({keys[i], newValue});
values[i] = newValue;
} else {
CAFFE_THROW("Dict max size reached");
}
}
}
bool Load(const T* keys, size_t numKeys) {
CAFFE_ENFORCE(
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
numKeys <= maxElements_,
"Cannot load index: Tensor is larger than max_elements.");
decltype(dict_) dict;
for (const auto i : c10::irange(0U, numKeys)) {
CAFFE_ENFORCE(
dict.insert({keys[i], i + 1}).second,
"Repeated elements found: cannot load into dictionary.");
}
// assume no `get` is inflight while this happens
{
std::lock_guard<std::mutex> lock(dictMutex_);
// let the old dict get destructed outside of the lock
dict_.swap(dict);
nextId_ = numKeys + 1;
}
return true;
}
bool Store(Tensor* out) {
std::lock_guard<std::mutex> lock(dictMutex_);
out->Resize(nextId_ - 1);
auto outData = out->template mutable_data<T>();
for (const auto& entry : dict_) {
outData[entry.second - 1] = entry.first;
}
return true;
}
private:
void FrozenGet(const T* keys, int64_tValue* values, size_t numKeys) {
for (const auto i : c10::irange(0U, numKeys)) {
auto it = dict_.find(keys[i]);
values[i] = it != dict_.end() ? it->second : 0;
}
}
std::unordered_map<T, int64_tValue> dict_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_INDEX_OPS_H_
| 3,212
| 24.299213
| 71
|
h
|
null |
pytorch-main/caffe2/operators/inference_lstm_op.h
|
#ifndef LSTM_OP_H_
#define LSTM_OP_H_
#include <algorithm>
#include <sstream>
#include <unordered_map>
#include <vector>
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include <c10/util/irange.h>
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
#include "lstm_utils.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(LSTMOp);
namespace caffe2 {
namespace {
using t_tuple = std::tuple<Tensor, Tensor>;
struct CellParams {
CellParams(
const Tensor& _w_ih,
const Tensor& _w_hh,
const Tensor& _b_ih,
const Tensor& _b_hh,
CPUContext* _context) {
initParams(_w_ih, _w_hh, _b_ih, _b_hh, _context);
}
CellParams(const CellParams& rhs) {
initParams(rhs.w_ih, rhs.w_hh, rhs.b_ih, rhs.b_hh, rhs.context);
}
CellParams& operator=(const CellParams& rhs) {
initParams(rhs.w_ih, rhs.w_hh, rhs.b_ih, rhs.b_hh, rhs.context);
return *this;
}
void initParams(
const Tensor& _w_ih,
const Tensor& _w_hh,
const Tensor& _b_ih,
const Tensor& _b_hh,
CPUContext* _context) {
w_ih = copy_ctor(_w_ih);
w_hh = copy_ctor(_w_hh);
b_ih = copy_ctor(_b_ih);
b_hh = copy_ctor(_b_hh);
context = _context;
}
Tensor w_ih;
Tensor w_hh;
Tensor b_ih; /* optional */
Tensor b_hh; /* optional */
CPUContext* context;
Tensor linear_ih(const Tensor& input) const {
return linear(input, w_ih, b_ih, context);
}
Tensor linear_hh(const Tensor& h) const {
return linear(h, w_hh, b_hh, context);
}
};
struct LSTMCell {
explicit LSTMCell(CPUContext* context) : context_(context) {}
t_tuple operator()(
const Tensor& input,
const t_tuple& hidden,
const CellParams& params) const {
const auto& hx = std::get<0>(hidden);
const auto& cx = std::get<1>(hidden);
auto linear_ih = params.linear_ih(input);
auto linear_hh = params.linear_hh(hx);
auto gates = add(linear_ih, linear_hh, context_);
auto chunked_gates = chunk(gates, 4, 1, context_);
auto ingate = sigmoid(chunked_gates[0]);
auto forgetgate = sigmoid(chunked_gates[1]);
auto cellgate = tanh(chunked_gates[2], context_);
auto outgate = sigmoid(chunked_gates[3]);
auto cy =
add(mul(forgetgate, cx, context_),
mul(ingate, cellgate, context_),
context_);
auto hy = mul(outgate, tanh(cy, context_), context_);
return std::make_tuple(std::move(hy), std::move(cy));
}
CPUContext* context_;
};
template <typename output_type, typename hidden_type>
struct LayerOutput {
output_type outputs;
hidden_type final_hidden;
LayerOutput(const output_type& _outputs, const hidden_type& _hidden) {
outputs = copy_ctor(_outputs);
final_hidden = copy_ctor(_hidden);
}
};
template <typename hidden_type, typename param_type>
struct Layer {
using output_type = LayerOutput<Tensor, hidden_type>;
virtual ~Layer() {}
virtual output_type operator()(
const Tensor& input,
const hidden_type& input_hidden,
const param_type& params) const = 0;
};
struct FullLSTMLayer : Layer<t_tuple, CellParams> {
FullLSTMLayer(LSTMCell& cell, CPUContext* context)
: cell_(cell), context_(context) {}
LayerOutput<std::vector<Tensor>, t_tuple> operator()(
const std::vector<Tensor>& step_inputs,
const std::tuple<Tensor, Tensor>& input_hidden,
const CellParams& params) const {
std::vector<Tensor> step_outputs;
auto hidden = copy_ctor(input_hidden);
for (const auto i : c10::irange(step_inputs.size())) {
hidden = cell_(step_inputs[i], hidden, params);
step_outputs.push_back(copy_ctor(std::get<0>(hidden)));
}
return {step_outputs, hidden};
}
LayerOutput<Tensor, t_tuple> operator()(
const Tensor& inputs,
const std::tuple<Tensor, Tensor>& input_hidden,
const CellParams& params) const override {
auto unstacked_output =
(*this)(unbind(inputs, 0, context_), input_hidden, params);
return {stack(unstacked_output.outputs, 0, context_),
unstacked_output.final_hidden};
}
LSTMCell cell_;
CPUContext* context_;
};
struct FullBidirectionalLSTMLayer
: Layer<std::pair<t_tuple, t_tuple>, std::pair<CellParams, CellParams>> {
using bidir_hidden_type = std::pair<t_tuple, t_tuple>;
using param_type = std::pair<CellParams, CellParams>;
using output_type = LayerOutput<Tensor, bidir_hidden_type>;
FullBidirectionalLSTMLayer(LSTMCell& cell, CPUContext* context)
: layer_(cell, context), context_(context) {}
output_type operator()(
const Tensor& input,
const bidir_hidden_type& input_hidden,
const param_type& params) const override {
std::vector<Tensor> outputs;
auto step_inputs = unbind(input, 0, context_);
auto fw_result = layer_(step_inputs, input_hidden.first, params.first);
auto fw_output = stack(fw_result.outputs, 0, context_);
outputs.push_back(copy_ctor(fw_output));
auto rev_step_inputs = reverse(std::move(step_inputs));
auto rev_result =
layer_(rev_step_inputs, input_hidden.second, params.second);
std::reverse(rev_result.outputs.begin(), rev_result.outputs.end());
auto rev_output = stack(rev_result.outputs, 0, context_);
outputs.push_back(copy_ctor(rev_output));
return {cat(outputs, fw_output.dim() - 1, context_),
std::make_pair(
std::move(fw_result.final_hidden),
std::move(rev_result.final_hidden))};
}
inline std::vector<Tensor> reverse(std::vector<Tensor>&& x) const {
std::reverse(x.begin(), x.end());
return std::move(x);
}
private:
FullLSTMLayer layer_;
CPUContext* context_;
};
template <typename hidden_type, typename weight_type>
LayerOutput<Tensor, std::vector<hidden_type>> apply_layer_stack(
const Layer<hidden_type, weight_type>& layer,
const Tensor& input,
const std::vector<hidden_type>& hiddens,
const std::vector<weight_type>& weights,
int64_t num_layers) {
CAFFE_ENFORCE(
num_layers == hiddens.size(),
"Expected more hidden states in stacked_rnn");
CAFFE_ENFORCE(
num_layers == weights.size(), "Expected more weights in stacked_rnn");
auto layer_input = input.UnsafeSharedInstance();
auto hidden_it = hiddens.begin();
auto weight_it = weights.begin();
std::vector<hidden_type> final_hiddens(num_layers);
for (const auto l : c10::irange(num_layers)) {
auto layer_output = layer(layer_input, *(hidden_it++), *(weight_it++));
final_hiddens.at(l) = std::move(layer_output.final_hidden);
layer_input = std::move(layer_output.outputs);
}
return {layer_input, final_hiddens};
}
std::tuple<Tensor, Tensor, Tensor> _lstm_impl(
const Tensor& input,
const std::vector<CellParams>& params,
const Tensor& hx,
const Tensor& cx,
int64_t num_layers,
bool bidirectional,
CPUContext* context) {
using stack_output = LayerOutput<Tensor, std::vector<t_tuple>>;
auto layer_hx = unbind(hx, 0, context);
auto layer_cx = unbind(cx, 0, context);
int64_t total_layers = layer_hx.size();
std::vector<std::tuple<Tensor, Tensor>> hiddens;
hiddens.reserve(total_layers);
for (const auto i : c10::irange(total_layers)) {
hiddens.emplace_back(std::move(layer_hx[i]), std::move(layer_cx[i]));
}
LSTMCell cell(context);
std::shared_ptr<stack_output> stack_output_ptr;
if (bidirectional) {
auto bidir_result = apply_layer_stack(
FullBidirectionalLSTMLayer{cell, context},
input,
pair_vec(hiddens),
pair_vec(params),
num_layers);
stack_output_ptr.reset(new stack_output(
bidir_result.outputs,
unpair_vec(std::move(bidir_result.final_hidden))));
} else {
auto result = apply_layer_stack(
FullLSTMLayer{cell, context}, input, hiddens, params, num_layers);
stack_output_ptr = std::make_shared<stack_output>(std::move(result));
}
std::vector<Tensor> hy, cy;
hy.reserve(total_layers);
cy.reserve(total_layers);
for (auto& hidden : stack_output_ptr->final_hidden) {
hy.push_back(std::move(std::get<0>(hidden)));
cy.push_back(std::move(std::get<1>(hidden)));
}
return std::make_tuple(
std::move(stack_output_ptr->outputs),
stack(hy, 0, context),
stack(cy, 0, context));
}
// Parses a flat list of parameter tensors into a list of CellParams
std::vector<CellParams> gather_params(
const std::vector<Tensor>& params,
bool has_biases,
CPUContext* context) {
Tensor undefined;
std::vector<CellParams> result;
if (has_biases) {
CAFFE_ENFORCE_EQ(
params.size() % 4, 0, "got an incorrect number of LSTM parameters");
for (size_t i = 0; i < params.size(); i += 4) {
result.emplace_back(
params[i], params[i + 1], params[i + 2], params[i + 3], context);
}
} else {
CAFFE_ENFORCE_EQ(
params.size() % 2, 0, "got an incorrect number of LSTM parameters");
for (size_t i = 0; i < params.size(); i += 2) {
result.emplace_back(
params[i], params[i + 1], undefined, undefined, context);
}
}
return result;
}
class InferenceLSTMOp : public Operator<CPUContext> {
public:
template <class... Args>
explicit InferenceLSTMOp(Args&&... args)
: Operator(std::forward<Args>(args)...),
num_layers_(this->template GetSingleArgument<int64_t>("num_layers", 1)),
bidirectional_(
this->template GetSingleArgument<bool>("bidirectional", false)),
has_biases_(this->template GetSingleArgument<bool>("has_biases", true)),
batch_first_(
this->template GetSingleArgument<bool>("batch_first", false)) {}
bool RunOnDevice() override;
protected:
int64_t num_layers_;
bool bidirectional_;
bool has_biases_;
bool batch_first_;
};
} // namespace
} // namespace caffe2
#endif // LSTM_OP_H_
| 9,923
| 30.807692
| 80
|
h
|
null |
pytorch-main/caffe2/operators/instance_norm_op.h
|
#ifndef CAFFE2_OPERATORS_INSTANCE_NORM_OP_H_
#define CAFFE2_OPERATORS_INSTANCE_NORM_OP_H_
#include <array>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class InstanceNormOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit InstanceNormOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE_GE(epsilon_, 0, "Must pass a nonnegative epsilon.");
CAFFE_ENFORCE_NE(
order_,
StorageOrder::UNKNOWN,
"order should be either \"NCHW\" or \"NHWC\".");
}
bool RunOnDevice() override {
const auto& X = Input(INPUT);
const auto& gamma = Input(SCALE);
const auto& beta = Input(BIAS);
const int ndim = X.dim();
const int64_t N = X.dim(0);
const int64_t C = order_ == StorageOrder::NCHW ? X.dim(1) : X.dim(ndim - 1);
const int64_t HxW = X.numel() / (N * C);
CAFFE_ENFORCE_EQ(gamma.numel(), C);
CAFFE_ENFORCE_EQ(beta.numel(), C);
auto* Y = Output(OUTPUT, X.sizes(), at::dtype<T>());
const T* X_data = X.template data<T>();
const T* gamma_data = gamma.template data<T>();
const T* beta_data = beta.template data<T>();
T* Y_data = Y->template mutable_data<T>();
T* mean_data = nullptr;
T* rstd_data = nullptr;
if (OutputSize() >= 2) {
auto* mean = Output(MEAN, {N, C}, at::dtype<T>());
mean_data = mean->template mutable_data<T>();
} else {
ReinitializeTensor(
&mean_, {N, C}, at::dtype<T>().device(Context::GetDeviceType()));
mean_data = mean_.template mutable_data<T>();
}
if (OutputSize() >= 3) {
auto* rstd = Output(RSTD, {N, C}, at::dtype<T>());
rstd_data = rstd->template mutable_data<T>();
} else {
ReinitializeTensor(
&rstd_, {N, C}, at::dtype<T>().device(Context::GetDeviceType()));
rstd_data = rstd_.template mutable_data<T>();
}
switch (order_) {
case StorageOrder::NCHW: {
return RunOnDeviceWithOrderNCHW(
N,
C,
HxW,
X_data,
gamma_data,
beta_data,
Y_data,
mean_data,
rstd_data);
}
case StorageOrder::NHWC: {
return RunOnDeviceWithOrderNHWC(
N,
C,
HxW,
X_data,
gamma_data,
beta_data,
Y_data,
mean_data,
rstd_data);
}
default: {
CAFFE_THROW("Unknown storage order: ", order_);
}
}
}
private:
bool RunOnDeviceWithOrderNCHW(
int64_t N,
int64_t C,
int64_t HxW,
const T* X,
const T* gamma,
const T* beta,
T* Y,
T* mean,
T* rstd);
bool RunOnDeviceWithOrderNHWC(
int64_t N,
int64_t C,
int64_t HxW,
const T* X,
const T* gamma,
const T* beta,
T* Y,
T* mean,
T* rstd);
const float epsilon_;
const StorageOrder order_;
Tensor mean_;
Tensor rstd_;
Tensor scale_;
Tensor bias_;
INPUT_TAGS(INPUT, SCALE, BIAS);
OUTPUT_TAGS(OUTPUT, MEAN, RSTD);
};
template <typename T, class Context>
class InstanceNormGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit InstanceNormGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE_GE(epsilon_, 0, "Must pass a nonnegative epsilon.");
CAFFE_ENFORCE_NE(
order_,
StorageOrder::UNKNOWN,
"order should be either \"NCHW\" or \"NHWC\".");
}
bool RunOnDevice() override {
const auto& X = Input(INPUT);
const auto& gamma = Input(SCALE);
const auto& dY = Input(OUTPUT_GRAD);
const int ndim = X.dim();
const int64_t N = X.dim(0);
const int64_t C = order_ == StorageOrder::NCHW ? X.dim(1) : X.dim(ndim - 1);
const int64_t HxW = X.numel() / (N * C);
CAFFE_ENFORCE_EQ(gamma.numel(), C);
const T* dY_data = dY.template data<T>();
const T* X_data = X.template data<T>();
const T* gamma_data = gamma.template data<T>();
const T* mean_data = nullptr;
const T* rstd_data = nullptr;
CAFFE_ENFORCE_GE(InputSize(), 4);
CAFFE_ENFORCE_LE(InputSize(), 6);
if (InputSize() == 6) {
const auto& mean = Input(MEAN);
const auto& rstd = Input(RSTD);
mean_data = mean.template data<T>();
rstd_data = rstd.template data<T>();
} else {
ReinitializeTensor(
&mean_, {N, C}, at::dtype<T>().device(Context::GetDeviceType()));
ReinitializeTensor(
&rstd_, {N, C}, at::dtype<T>().device(Context::GetDeviceType()));
ComputeMoments(
N,
C,
HxW,
X_data,
mean_.template mutable_data<T>(),
rstd_.template mutable_data<T>());
mean_data = mean_.template data<T>();
rstd_data = rstd_.template data<T>();
}
auto* dX = Output(INPUT_GRAD, X.sizes(), at::dtype<T>());
auto* dgamma = Output(SCALE_GRAD, gamma.sizes(), at::dtype<T>());
auto* dbeta = Output(BIAS_GRAD, gamma.sizes(), at::dtype<T>());
T* dX_data = dX->template mutable_data<T>();
T* dgamma_data = dgamma->template mutable_data<T>();
T* dbeta_data = dbeta->template mutable_data<T>();
switch (order_) {
case StorageOrder::NCHW: {
return RunOnDeviceWithOrderNCHW(
N,
C,
HxW,
dY_data,
X_data,
mean_data,
rstd_data,
gamma_data,
dX_data,
dgamma_data,
dbeta_data);
}
case StorageOrder::NHWC: {
return RunOnDeviceWithOrderNHWC(
N,
C,
HxW,
dY_data,
X_data,
mean_data,
rstd_data,
gamma_data,
dX_data,
dgamma_data,
dbeta_data);
}
default: {
CAFFE_THROW("Unknown storage order: ", order_);
}
}
}
private:
void ComputeMoments(
int64_t N,
int64_t C,
int64_t HxW,
const T* X,
T* mean,
T* rstd);
bool RunOnDeviceWithOrderNCHW(
int64_t N,
int64_t C,
int64_t HxW,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
T* dX,
T* dgamma,
T* dbeta);
bool RunOnDeviceWithOrderNHWC(
int64_t N,
int64_t C,
int64_t HxW,
const T* dY,
const T* X,
const T* mean,
const T* rstd,
const T* gamma,
T* dX,
T* dgamma,
T* dbeta);
const float epsilon_;
const StorageOrder order_;
Tensor mean_;
Tensor rstd_;
Tensor ds_;
Tensor db_;
Tensor c1_;
Tensor c2_;
Tensor c3_;
Tensor ones_;
INPUT_TAGS(INPUT, SCALE, BIAS, OUTPUT_GRAD, MEAN, RSTD);
OUTPUT_TAGS(INPUT_GRAD, SCALE_GRAD, BIAS_GRAD);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_INSTANCE_NORM_OP_H_
| 7,459
| 25.642857
| 80
|
h
|
null |
pytorch-main/caffe2/operators/integral_image_op.h
|
#ifndef INTEGRAL_IMAGE_OP_H_
#define INTEGRAL_IMAGE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class IntegralImageOp final : public Operator<Context> {
public:
template <class... Args>
explicit IntegralImageOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
template <typename T, class Context>
class IntegralImageGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit IntegralImageGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
Tensor row_pass_buffer_;
};
} // namespace caffe2
#endif // INTEGRAL_IMAGE_OP_H_
| 923
| 22.692308
| 64
|
h
|
null |
pytorch-main/caffe2/operators/jsd_op.h
|
#ifndef CAFFE2_OPERATORS_JSD_OP_H_
#define CAFFE2_OPERATORS_JSD_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class BernoulliJSDOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(BernoulliJSDOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
template <typename T, class Context>
class BernoulliJSDGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(BernoulliJSDGradientOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_JSD_OP_H_
| 721
| 23.066667
| 63
|
h
|
null |
pytorch-main/caffe2/operators/key_split_ops.h
|
#pragma once
#include <c10/util/irange.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include <vector>
namespace caffe2 {
template <typename T, class Context>
class KeySplitOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit KeySplitOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
categorical_limit_(
this->template GetSingleArgument<int>("categorical_limit", 0)) {
CAFFE_ENFORCE_GT(categorical_limit_, 0);
}
bool RunOnDevice() override {
auto& keys = Input(0);
const auto N = keys.numel();
const T *const keys_data = keys.template data<T>();
std::vector<int> counts(categorical_limit_);
std::vector<int*> eids(categorical_limit_);
for (const auto k : c10::irange(categorical_limit_)) {
counts[k] = 0;
}
for (const auto i : c10::irange(N)) {
const auto k = keys_data[i];
CAFFE_ENFORCE_GT(categorical_limit_, k);
CAFFE_ENFORCE_GE(k, 0);
counts[k]++;
}
for (const auto k : c10::irange(categorical_limit_)) {
auto *const eid = Output(k, {counts[k]}, at::dtype<int>());
eids[k] = eid->template mutable_data<int>();
counts[k] = 0;
}
for (const auto i : c10::irange(N)) {
const auto k = keys_data[i];
eids[k][counts[k]++] = i;
}
return true;
}
private:
int categorical_limit_;
};
} // namespace caffe2
| 1,494
| 26.181818
| 76
|
h
|
null |
pytorch-main/caffe2/operators/layer_norm_op.h
|
#ifndef CAFFE2_OPERATORS_LAYER_NORM_OP_H_
#define CAFFE2_OPERATORS_LAYER_NORM_OP_H_
#include <array>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(LayerNorm)
namespace caffe2 {
template <class Context>
class LayerNormOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LayerNormOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, 1),
OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5f),
OP_SINGLE_ARG(bool, "elementwise_affine", elementwise_affine_, false) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& X = Input(0);
auto* Y = Output(0);
CAFFE_ENFORCE_GE(X.dim(), 2, "LayerNorm requires input dim >= 2.");
const int canonical_axis = X.canonical_axis_index(axis_);
std::vector<int64_t> moments_dims(
X.sizes().cbegin(), X.sizes().cbegin() + canonical_axis);
moments_dims.push_back(1);
auto* mean = Output(1, moments_dims, at::dtype<T>());
auto* sigma = Output(2, moments_dims, at::dtype<T>());
const int M = X.size_to_dim(canonical_axis);
const int N = X.size_from_dim(canonical_axis);
Y->ResizeLike(X);
scale_.Resize(M);
bias_.Resize(M);
const T* X_data = X.template data<T>();
T* Y_data = Y->template mutable_data<T>();
T* mean_data = mean->template mutable_data<T>();
T* sigma_data = sigma->template mutable_data<T>();
T* scale_data = scale_.template mutable_data<T>();
T* bias_data = bias_.template mutable_data<T>();
if (M == 0) {
return true;
}
const std::array<int, 2> X_dims = {M, N};
const std::array<int, 2> Y_dims = {M, 1};
math::Moments<T, Context>(
2,
X_dims.data(),
Y_dims.data(),
X_data,
mean_data,
sigma_data,
&context_);
ComputeSigmaAndFusedParams<T>(
M, epsilon_, mean_data, sigma_data, sigma_data, scale_data, bias_data);
const T* gamma_data = nullptr;
const T* beta_data = nullptr;
if (elementwise_affine_) {
CAFFE_ENFORCE_EQ(InputSize(), 3);
const auto& gamma = Input(1);
const auto& beta = Input(2);
CAFFE_ENFORCE_EQ(gamma.numel(), N);
CAFFE_ENFORCE_EQ(beta.numel(), N);
gamma_data = gamma.template data<T>();
beta_data = beta.template data<T>();
}
LayerNormForward<T>(
M, N, X_data, scale_data, bias_data, gamma_data, beta_data, Y_data);
return true;
}
private:
template <typename T>
void ComputeSigmaAndFusedParams(
const int N,
const float eps,
const T* mean,
const T* var,
T* stddev,
T* scale,
T* bias);
template <typename T>
void LayerNormForward(
const int M,
const int N,
const T* X,
const T* scale,
const T* bias,
const T* gamma,
const T* beta,
T* Y);
const int axis_;
const float epsilon_;
const bool elementwise_affine_;
Tensor scale_{Context::GetDeviceType()};
Tensor bias_{Context::GetDeviceType()};
};
template <class Context>
class LayerNormGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LayerNormGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, 1),
OP_SINGLE_ARG(bool, "elementwise_affine", elementwise_affine_, false) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& dY = Input(0);
const auto& mean = Input(2);
const auto& sigma = Input(3);
const auto& X = Input(4);
const int canonical_axis = X.canonical_axis_index(axis_);
const int M = X.size_to_dim(canonical_axis);
const int N = X.size_from_dim(canonical_axis);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
ReinitializeTensor(
&ds_, {M}, at::dtype<T>().device(Context::GetDeviceType()));
ReinitializeTensor(
&db_, {M}, at::dtype<T>().device(Context::GetDeviceType()));
ReinitializeTensor(
&rstd_, {M}, at::dtype<T>().device(Context::GetDeviceType()));
ReinitializeTensor(
&X_scale_, {M}, at::dtype<T>().device(Context::GetDeviceType()));
ReinitializeTensor(
&bias_, {M}, at::dtype<T>().device(Context::GetDeviceType()));
const T* dY_data = dY.template data<T>();
const T* X_data = X.template data<T>();
const T* mean_data = mean.template data<T>();
const T* sigma_data = sigma.template data<T>();
T* dX_data = dX->template mutable_data<T>();
T* ds_data = ds_.template mutable_data<T>();
T* db_data = db_.template mutable_data<T>();
T* rstd_data = rstd_.template mutable_data<T>();
T* X_scale_data = X_scale_.template mutable_data<T>();
T* bias_data = bias_.template mutable_data<T>();
const T* gamma_data = nullptr;
T* dgamma_data = nullptr;
T* dbeta_data = nullptr;
T* g_scale_data = nullptr;
if (elementwise_affine_) {
const auto& gamma = Input(5);
auto* dgamma = Output(1, gamma.sizes(), at::dtype<T>());
auto* dbeta = Output(2, gamma.sizes(), at::dtype<T>());
ReinitializeTensor(
&g_scale_, {M}, at::dtype<T>().device(Context::GetDeviceType()));
gamma_data = gamma.template data<T>();
dgamma_data = dgamma->template mutable_data<T>();
dbeta_data = dbeta->template mutable_data<T>();
g_scale_data = g_scale_.template mutable_data<T>();
}
if (M == 0) {
if (N > 0 && dgamma_data != nullptr) {
math::Set<T, Context>(N, T(0), dgamma_data, &context_);
}
if (N > 0 && dbeta_data != nullptr) {
math::Set<T, Context>(N, T(0), dbeta_data, &context_);
}
return true;
}
ComputeInternalGradients<T>(
M, N, dY_data, X_data, gamma_data, dX_data, ds_data, db_data);
ComputeFusedParams<T>(
M,
N,
mean_data,
sigma_data,
ds_data,
db_data,
rstd_data,
X_scale_data,
bias_data,
g_scale_data);
if (elementwise_affine_) {
GammaBetaBackward<T>(
M,
N,
dX_data,
dY_data,
rstd_data,
g_scale_data,
dgamma_data,
dbeta_data);
}
LayerNormBackward<T>(
M,
N,
dY_data,
X_data,
gamma_data,
rstd_data,
X_scale_data,
bias_data,
dX_data);
return true;
}
private:
template <typename T>
void ComputeInternalGradients(
const int M,
const int N,
const T* dY,
const T* X,
const T* gamma,
T* dYxX,
T* ds,
T* db);
template <typename T>
void ComputeFusedParams(
const int M,
const int N,
const T* mean,
const T* sigma,
const T* ds,
const T* db,
T* rstd,
T* X_scale,
T* bias,
T* g_scale);
template <typename T>
void LayerNormBackward(
const int M,
const int N,
const T* dY,
const T* X,
const T* gamma,
const T* dY_scale,
const T* X_scale,
const T* bias,
T* dX);
template <typename T>
void GammaBetaBackward(
const int M,
const int N,
const T* dYxX,
const T* dY,
const T* rstd,
const T* g_scale,
T* dgamma,
T* dbeta);
const int axis_;
const bool elementwise_affine_;
Tensor ds_;
Tensor db_;
Tensor rstd_;
Tensor X_scale_;
Tensor bias_;
Tensor g_scale_;
Tensor ones_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LAYER_NORM_OP_H_
| 8,012
| 26.441781
| 80
|
h
|
null |
pytorch-main/caffe2/operators/leaky_relu_op.h
|
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class LeakyReluOp : public Operator<Context> {
public:
template <class... Args>
explicit LeakyReluOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...), alpha_(0.01) {
if (HasArgument("alpha")) {
alpha_ = static_cast<T>(
this->template GetSingleArgument<float>("alpha", 0.01));
}
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
T alpha_;
};
template <typename T, class Context>
class LeakyReluGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit LeakyReluGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...), alpha_(0.01) {
if (HasArgument("alpha")) {
alpha_ = static_cast<T>(
this->template GetSingleArgument<float>("alpha", 0.01));
}
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
T alpha_;
};
} // namespace caffe2
| 1,111
| 21.24
| 70
|
h
|
null |
pytorch-main/caffe2/operators/length_split_op.h
|
#ifndef CAFFE2_OPERATORS_LENGTH_SPLIT_OP_H_
#define CAFFE2_OPERATORS_LENGTH_SPLIT_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class LengthsSplitOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LengthsSplitOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
n_split_(OperatorBase::GetSingleArgument<int32_t>("n_split", 0)) {
if (InputSize() == 1) {
// If not specified, then must have this argument
CAFFE_ENFORCE(
OperatorBase::HasArgument("n_split"),
"Argument `n_split` is missing and was not specified as input.");
CAFFE_ENFORCE(
n_split_ > 0,
"`n_split` must contain a positive value for defined behavior.");
}
}
~LengthsSplitOp() override {}
bool RunOnDevice() override {
const auto& L = Input(0);
CAFFE_ENFORCE_EQ(L.dim(), 1, "Input `LENGTHS` should be a 1D vector.");
if (InputSize() > 1) {
// We potentially have n_split specified as inputs as well
CAFFE_ENFORCE(
Input(1).dim() == 1 && Input(1).numel() == 1,
"Input `n_split` should be a vector of size 1.");
const auto& input1 = Input(1);
context_.template CopyItems<Context, CPUContext>(
input1.dtype(), 1, input1.raw_data(), &n_split_);
}
CAFFE_ENFORCE(
n_split_ > 0,
"`n_split` must contain a positive value for defined behavior.");
const auto M = L.numel();
auto* Y = Output(0, {M * n_split_}, at::dtype<int32_t>());
const int32_t* Ldata = L.template data<int32_t>();
int32_t* Ydata = Y->template mutable_data<int32_t>();
for (const auto i : c10::irange(M)) {
int32_t mod = Ldata[i] % n_split_;
int32_t res =
mod != 0 ? math::DivUp(Ldata[i], n_split_) : Ldata[i] / n_split_ + 1;
for (const auto j : c10::irange(n_split_)) {
Ydata[(i * n_split_) + j] = mod-- > 0 ? res : res - 1;
}
}
return true;
}
private:
int32_t n_split_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LENGTH_SPLIT_OP_H_
| 2,313
| 29.051948
| 79
|
h
|
null |
pytorch-main/caffe2/operators/lengths_pad_op.h
|
#ifndef CAFFE2_OPERATORS_LENGTHS_PAD_OP_H_
#define CAFFE2_OPERATORS_LENGTHS_PAD_OP_H_
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class LengthsPadOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LengthsPadOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(double, "padding_value", padding_value_, -1),
OP_SINGLE_ARG(int, "target_length", target_length_, -1) {
CAFFE_ENFORCE_GE(target_length_, 1, "target_length argument must be >= 1");
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double, int32_t, int64_t>>::call(
this, Input(DATA));
}
template <typename T>
bool DoRunWithType() {
auto& data = Input(DATA);
auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE_EQ(lengths.dim(), 1, "LENGTHS must be 1-D");
CAFFE_ENFORCE_GE(data.dim(), 1, "DATA should be at least 1-D");
// Context::CopyFrom and math::Sum need the same context to avoid race
// conditions
// why? CPUContext is not used in Sum
lengths_host_.CopyFrom(lengths);
auto lengths_size = lengths_host_.numel();
auto* lengths_data = lengths_host_.template data<int32_t>();
int32_t total_length = 0;
CPUContext cpuContext;
math::Sum<int32_t, CPUContext>(
lengths_size, lengths_data, &total_length, &cpuContext);
CAFFE_ENFORCE_EQ(total_length, data.size(0));
auto shape = data.sizes().vec();
shape[0] = lengths_size * target_length_;
auto* output = Output(0, shape, at::dtype<T>());
auto block_size = data.size_from_dim(1);
auto src_data = data.template data<T>();
auto out_data = output->template mutable_data<T>();
math::Set(
output->numel(), static_cast<T>(padding_value_), out_data, &context_);
for (const auto i : c10::irange(lengths_size)) {
auto length = lengths_data[i];
CAFFE_ENFORCE_GE(length, 0);
CAFFE_ENFORCE_GE(
target_length_,
length,
"Length at index = ",
i,
" is larger than target length");
context_.template CopySameDevice<T>(
block_size * length, src_data, out_data);
out_data += block_size * target_length_;
src_data += block_size * length;
}
return true;
}
INPUT_TAGS(DATA, LENGTHS);
private:
double padding_value_;
int target_length_;
Tensor lengths_host_{CPU};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LENGTHS_PAD_OP_H_
| 2,607
| 27.977778
| 79
|
h
|
null |
pytorch-main/caffe2/operators/lengths_reducer_fused_8bit_rowwise_ops.h
|
#ifndef CAFFE2_OPERATORS_LENGTHS_REDUCER_FUSED_8BIT_ROWWISE_OPS_H_
#define CAFFE2_OPERATORS_LENGTHS_REDUCER_FUSED_8BIT_ROWWISE_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/fused_rowwise_8bit_conversion_ops.h"
#include "caffe2/operators/reducer_functors.h"
#include "caffe2/perfkernels/fused_8bit_rowwise_embedding_lookup.h"
#include "caffe2/utils/math.h"
#ifdef USE_FBGEMM
#include "fbgemm/Fbgemm.h"
#endif
namespace caffe2 {
template <class Context, bool with_weights = false, bool is_mean = false>
class SparseLengthsFused8BitRowwiseOp : public Operator<Context> {
public:
static_assert(
!(with_weights && is_mean),
"Cannot have with_weights and is_mean a the same time");
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(SparseLengthsFused8BitRowwiseOp)
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
const auto& data = Input(DATA);
const auto& indices = Input(INDICES);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES must be a vector");
CAFFE_ENFORCE_EQ(lengths.dim(), 1, "LENGTHS must be a vector");
const float* weights = nullptr;
if (with_weights) {
const auto& weights_input = Input(WEIGHTS);
CAFFE_ENFORCE_EQ(weights_input.dim(), 1, "WEIGHTS must be a vector");
CAFFE_ENFORCE_EQ(
weights_input.numel(),
indices.numel(),
"WEIGHTS should have the same length as INDICES.");
weights = weights_input.template data<float>();
}
CAFFE_ENFORCE_GT(data.size(1), 8, "DATA must have more than 8 columns");
// Subtract 8 from the #columns of data for the 4 bytes for scale and 4
// bytes for bias that we use in the fused representation (per row).
const std::vector<int64_t> shape = {lengths.size(0), data.size(1) - 8};
auto* output = Output(0, shape, at::dtype<float>());
std::int64_t block_size = output->size(1);
auto output_size = output->size(0);
auto index_size = indices.numel();
auto data_size = data.size(0);
const std::uint8_t* input_data = data.template data<std::uint8_t>();
const int* lengths_data = lengths.template data<int>();
float* output_data = output->template mutable_data<float>();
#ifdef USE_FBGEMM
// Calling the JITed kernel from FBGEMM
// Will Remove the call to C2/perfkernels/
// If this is the first call or block size has changed (should never happen
// actually), generate a kernel.
if (block_size != last_block_size) {
last_block_size = block_size;
if (std::is_same<IndexType, std::int32_t>::value) {
kernel32_ = fbgemm::GenerateEmbeddingSpMDM<std::uint8_t, std::int32_t>(
block_size,
with_weights,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
} else {
CAFFE_ENFORCE((std::is_same<IndexType, std::int64_t>::value));
kernel64_ = fbgemm::GenerateEmbeddingSpMDM<std::uint8_t, std::int64_t>(
block_size,
with_weights,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
}
}
bool success;
if (std::is_same<IndexType, std::int32_t>::value) {
success = kernel32_(
output_size,
index_size,
data_size,
input_data,
indices.template data<std::int32_t>(),
lengths_data,
weights,
output_data);
} else {
success = kernel64_(
output_size,
index_size,
data_size,
input_data,
indices.template data<std::int64_t>(),
lengths_data,
weights,
output_data);
}
if (success) {
return true;
}
auto indices_data = indices.template data<IndexType>();
int64_t current = 0;
for (const auto m : c10::irange(output_size)) {
for (int i = 0; i < lengths_data[m]; ++i) {
CAFFE_ENFORCE_LT(current, index_size);
IndexType idx = indices_data[current];
CAFFE_ENFORCE(
0 <= idx && idx < data_size,
"Index ",
current,
" is out of bounds: ",
idx,
", range 0 to ",
data_size);
++current;
}
}
CAFFE_ENFORCE_EQ(
current,
index_size,
"Your input seems to be incorrect: the sum of lengths values should be "
"the size of the indices tensor, but it appears not.");
return false;
#else
Fused8BitRowwiseEmbeddingLookup(
block_size,
output_size,
index_size,
data_size,
input_data,
indices.template data<IndexType>(),
lengths_data,
weights,
is_mean,
output_data);
return true;
#endif
}
enum {
DATA = 0,
WEIGHTS = 1,
INDICES = 1 + with_weights,
LENGTHS = 2 + with_weights,
};
#ifdef USE_FBGEMM
private:
std::int64_t last_block_size{-1};
fbgemm::EmbeddingSpMDMKernelSignature<std::uint8_t, std::int32_t>::Type
kernel32_;
fbgemm::EmbeddingSpMDMKernelSignature<std::uint8_t, std::int64_t>::Type
kernel64_;
#endif
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LENGTHS_REDUCER_FUSED_8BIT_ROWWISE_OPS_H_
| 5,540
| 29.278689
| 80
|
h
|
null |
pytorch-main/caffe2/operators/lengths_reducer_fused_nbit_rowwise_ops.h
|
#ifndef CAFFE2_OPERATORS_LENGTHS_REDUCER_FUSED_NBIT_ROWWISE_OPS_H_
#define CAFFE2_OPERATORS_LENGTHS_REDUCER_FUSED_NBIT_ROWWISE_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/fused_rowwise_nbit_conversion_ops.h"
#include "caffe2/operators/reducer_functors.h"
#include "caffe2/utils/math.h"
#ifdef USE_FBGEMM
#include "fbgemm/FbgemmEmbedding.h"
#endif
namespace caffe2 {
template <
int BIT_RATE,
class Context,
bool with_weights = false,
bool is_mean = false>
class SparseLengthsFusedNBitRowwiseOp final : public Operator<Context> {
public:
static_assert(
!(with_weights && is_mean),
"Cannot have with_weights and is_mean a the same time");
USE_OPERATOR_CONTEXT_FUNCTIONS;
SparseLengthsFusedNBitRowwiseOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws) {}
~SparseLengthsFusedNBitRowwiseOp() override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
const auto& data = Input(DATA);
const auto& indices = Input(INDICES);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES must be a vector");
CAFFE_ENFORCE_EQ(lengths.dim(), 1, "LENGTHS must be a vector");
const float* weights = nullptr;
if (with_weights) {
const auto& weights_input = Input(WEIGHTS);
CAFFE_ENFORCE_EQ(weights_input.dim(), 1, "WEIGHTS must be a vector");
CAFFE_ENFORCE_EQ(
weights_input.numel(),
indices.numel(),
"WEIGHTS should have the same length as INDICES.");
weights = weights_input.template data<float>();
}
CAFFE_ENFORCE_GT(
data.size(1),
sizeof(at::Half) + sizeof(at::Half),
"DATA must have more than 4 columns");
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// Subtract 4 from the #columns of data for the 2 bytes for fp16 scale and 2
// byte for bias that we use in the fused representation (per row).
const std::vector<int64_t> shape = {
lengths.size(0),
static_cast<int64_t>(data.size(1) - 2 * sizeof(at::Half)) *
NUM_ELEM_PER_BYTE};
auto* output = Output(0, shape, at::dtype<float>());
int output_size = output->size(0);
int block_size = output->size(1);
CAFFE_ENFORCE_EQ(
block_size % NUM_ELEM_PER_BYTE,
0,
"block size must be divisible by " + std::to_string(NUM_ELEM_PER_BYTE));
int index_size = indices.numel();
auto data_size = data.size(0);
const uint8_t* input_data = data.template data<uint8_t>();
const IndexType* indices_data = indices.template data<IndexType>();
const int* lengths_data = lengths.template data<int>();
float* output_data = output->template mutable_data<float>();
#ifdef USE_FBGEMM
// If this is the first call or block size has changed (should never happen
// actually), generate a kernel.
if (block_size != last_block_size) {
last_block_size = block_size;
if (std::is_same<IndexType, std::int32_t>::value) {
kernel32_ = fbgemm::GenerateEmbeddingSpMDMNBit<std::int32_t>(
BIT_RATE,
block_size,
weights != nullptr,
is_mean,
/*prefetch distance*/ 8,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
} else {
CAFFE_ENFORCE((std::is_same<IndexType, std::int64_t>::value));
kernel64_ = fbgemm::GenerateEmbeddingSpMDMNBit<std::int64_t>(
BIT_RATE,
block_size,
weights != nullptr,
is_mean,
/*prefetch distance*/ 8,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
}
}
bool success;
if (std::is_same<IndexType, std::int32_t>::value) {
success = kernel32_(
output_size,
index_size,
data_size,
input_data,
reinterpret_cast<const std::int32_t*>(indices_data),
lengths_data,
weights,
output_data);
} else {
success = kernel64_(
output_size,
index_size,
data_size,
input_data,
reinterpret_cast<const std::int64_t*>(indices_data),
lengths_data,
weights,
output_data);
}
if (success) {
return true;
}
// Error handling
int64_t current = 0;
for (const auto m : c10::irange(output_size)) {
for (int i = 0; i < lengths_data[m]; ++i) {
CAFFE_ENFORCE_LT(current, index_size);
IndexType idx = indices_data[current];
CAFFE_ENFORCE(
0 <= idx && idx < data_size,
"Index ",
current,
" is out of bounds: ",
idx,
", range 0 to ",
data_size);
++current;
}
}
CAFFE_ENFORCE_EQ(
current,
index_size,
"Your input seems to be incorrect: the sum of lengths values should be "
"the size of the indices tensor, but it appears not.");
return false;
#else
C10_LOG_EVERY_N(WARNING, 10)
<< "Running slow path because FBGEMM is not available";
int64_t current = 0;
for (const auto m : c10::irange(output_size)) {
memset(output_data, 0, block_size * sizeof(float));
if (current + lengths_data[m] > index_size) {
return false;
}
for (int i = 0; i < lengths_data[m]; ++i, ++current) {
IndexType idx = indices_data[current];
if (idx < 0 || idx >= data_size) {
return false;
}
const at::Half* scale_bias = reinterpret_cast<const at::Half*>(
input_data + (idx + 1) * data.size(1) - 2 * sizeof(at::Half));
float weight = 1.0f;
if (with_weights) {
weight = weights[current];
}
const float scale = weight * scale_bias[0];
const float bias = weight * scale_bias[1];
for (const auto j : c10::irange(block_size)) {
uint8_t quantized =
input_data[idx * data.size(1) + j / NUM_ELEM_PER_BYTE];
quantized >>= (j % NUM_ELEM_PER_BYTE) * BIT_RATE;
quantized &= (1 << BIT_RATE) - 1;
output_data[j] = std::fma(scale, quantized, output_data[j] + bias);
}
} // for each i
if (is_mean && lengths_data[m]) {
float scale = 1.0f / lengths_data[m];
for (const auto j : c10::irange(block_size)) {
output_data[j] *= scale;
}
}
output_data += block_size;
} // for each m
return current == index_size;
#endif // USE_FBGEMM
}
enum {
DATA = 0,
WEIGHTS = 1,
INDICES = 1 + with_weights,
LENGTHS = 2 + with_weights,
};
#ifdef USE_FBGEMM
private:
std::int64_t last_block_size{-1};
fbgemm::EmbeddingSpMDMKernelSignature<std::uint8_t, std::int32_t>::Type
kernel32_;
fbgemm::EmbeddingSpMDMKernelSignature<std::uint8_t, std::int64_t>::Type
kernel64_;
#endif
}; // class SparseLengthsFusedNBitRowwiseOp
class SparseLengthsSumSparseLookupOp final : public Operator<CPUContext> {
public:
SparseLengthsSumSparseLookupOp(const OperatorDef& def, Workspace* ws)
: Operator<CPUContext>(def, ws) {}
~SparseLengthsSumSparseLookupOp() override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
const auto& indices = Input(INDICES);
const auto& lengths = Input(LENGTHS);
const auto& compressed_indices_mapping = Input(COMPRESSED_INDICES_MAPPING);
thread_local static std::vector<float> dummy_weight;
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES must be a vector");
CAFFE_ENFORCE_EQ(lengths.dim(), 1, "LENGTHS must be a vector");
CAFFE_ENFORCE_EQ(
compressed_indices_mapping.dim(), 1, "LENGTHS must be a vector");
const int32_t* lengths_data = lengths.template data<int32_t>();
const IndexType* indices_data = indices.template data<IndexType>();
const int32_t* compressed_indices_mapping_data =
compressed_indices_mapping.template data<std::int32_t>();
dummy_weight.resize(indices.size(0));
const float* weights = dummy_weight.data();
bool has_weights = (InputSize() > 3);
if (has_weights) {
const auto& weights_input = Input(WEIGHTS);
CAFFE_ENFORCE_EQ(weights_input.dim(), 1, "WEIGHTS must be a vector");
CAFFE_ENFORCE_EQ(
weights_input.numel(),
indices.numel(),
"WEIGHTS should have the same length as INDICES.");
weights = weights_input.template data<float>();
}
// Allocate for the max possible size for now and later we may shrink the
// indices size.
auto* output_indices =
Output(INDICES, indices.sizes(), at::dtype<IndexType>());
auto* output_lengths =
Output(LENGTHS, lengths.sizes(), at::dtype<int32_t>());
Tensor* output_weights = nullptr;
float* output_weights_data = dummy_weight.data();
if (has_weights) {
output_weights = Output(2, indices.sizes(), at::dtype<float>());
output_weights_data = output_weights->template mutable_data<float>();
}
int32_t* output_lengths_data =
output_lengths->template mutable_data<int32_t>();
IndexType* output_indices_data =
output_indices->template mutable_data<IndexType>();
const int32_t output_size = lengths.size(0);
const IndexType index_size = indices.size(0);
const IndexType compressed_data_size = compressed_indices_mapping.size(0);
IndexType current = 0;
IndexType current_output = 0;
for (const auto m : c10::irange(output_size)) {
const auto current_length = lengths_data[m];
if (current + current_length > index_size) {
return false;
}
int32_t skipped = 0;
for (const auto i : c10::irange(current_length)) {
(void)i; // Suppress unused variable warning
IndexType compressed_idx = indices_data[current];
if (compressed_idx < 0 || compressed_idx >= compressed_data_size) {
return false;
}
IndexType idx = compressed_indices_mapping_data[compressed_idx];
if (idx == -1) {
++skipped;
} else {
output_weights_data[current_output] = weights[current];
output_indices_data[current_output++] = idx;
}
++current;
}
output_lengths_data[m] = current_length - skipped;
}
if (current_output < index_size) {
output_indices->ShrinkTo(current_output);
if (output_weights) {
output_weights->ShrinkTo(current_output);
}
}
return true;
}
enum {
INDICES = 0,
LENGTHS = 1,
COMPRESSED_INDICES_MAPPING = 2,
WEIGHTS = 3
};
};
template <int BIT_RATE, bool with_weights = false, bool is_mean = false>
class SparseLengthsNBitRowwiseSparseOp final : public Operator<CPUContext> {
public:
static_assert(
!(with_weights && is_mean),
"Cannot have with_weights and is_mean a the same time");
template<class... Args>
explicit SparseLengthsNBitRowwiseSparseOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {}
~SparseLengthsNBitRowwiseSparseOp() override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
const auto& data = Input(DATA);
const auto& indices = Input(INDICES);
const auto& lengths = Input(LENGTHS);
const auto& compressed_indices_mapping = Input(COMPRESSED_INDICES_MAPPING);
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES must be a vector");
CAFFE_ENFORCE_EQ(lengths.dim(), 1, "LENGTHS must be a vector");
const float* weights = nullptr;
if (with_weights) {
const auto& weights_input = Input(WEIGHTS);
CAFFE_ENFORCE_EQ(weights_input.dim(), 1, "WEIGHTS must be a vector");
CAFFE_ENFORCE_EQ(
weights_input.numel(),
indices.numel(),
"WEIGHTS should have the same length as INDICES.");
weights = weights_input.template data<float>();
}
CAFFE_ENFORCE_GT(
data.size(1),
sizeof(at::Half) + sizeof(at::Half),
"DATA must have more than 4 columns");
static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8");
constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE;
// Subtract 4 (or 8 for BIT_RATE == 8) from the #columns of data for the
// fp16 (or fp32 for BIT_RATE == 8) scale and bias that we use in the fused
// representation (per row).
const std::vector<int64_t> shape = {
lengths.size(0),
static_cast<int64_t>(
data.size(1) -
2 * (BIT_RATE == 8 ? sizeof(float) : sizeof(at::Half))) *
NUM_ELEM_PER_BYTE};
auto* output = Output(0, shape, at::dtype<float>());
int output_size = output->size(0);
int block_size = output->size(1);
CAFFE_ENFORCE_EQ(
block_size % NUM_ELEM_PER_BYTE,
0,
"block size must be divisible by " + std::to_string(NUM_ELEM_PER_BYTE));
auto data_size = data.size(0);
int index_size = indices.numel();
const uint8_t* input_data = data.template data<uint8_t>();
const IndexType* indices_data = indices.template data<IndexType>();
const int* lengths_data = lengths.template data<int>();
float* output_data = output->template mutable_data<float>();
const std::int32_t* compressed_indices_mapping_data =
compressed_indices_mapping.template data<std::int32_t>();
// if compressed_indices_mapping is [0], it is a indicator that
// we should fallback to normal SLS, which is also a valid fallback if
// the LUT is pruned.
const bool fallback_to_no_sparse =
(compressed_indices_mapping.numel() == 1 &&
compressed_indices_mapping_data[0] == 0);
#ifdef USE_FBGEMM
// If this is the first call or block size has changed (should never happen
// actually), generate a kernel.
if (block_size != last_block_size) {
if (!fallback_to_no_sparse) {
last_block_size = block_size;
if (std::is_same<IndexType, std::int32_t>::value) {
if (BIT_RATE == 8) {
kernel32_ = fbgemm::
GenerateEmbeddingSpMDMRowWiseSparse<std::uint8_t, std::int32_t>(
block_size,
weights != nullptr,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
} else {
kernel32_ =
fbgemm::GenerateEmbeddingSpMDMNBitRowWiseSparse<std::int32_t>(
BIT_RATE,
block_size,
weights != nullptr,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
}
} else {
CAFFE_ENFORCE((std::is_same<IndexType, std::int64_t>::value));
if (BIT_RATE == 8) {
kernel64_ = fbgemm::
GenerateEmbeddingSpMDMRowWiseSparse<std::uint8_t, std::int64_t>(
block_size,
weights != nullptr,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
} else {
kernel64_ =
fbgemm::GenerateEmbeddingSpMDMNBitRowWiseSparse<std::int64_t>(
BIT_RATE,
block_size,
weights != nullptr,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
}
}
} else { // fallback_to_no_sparse == true
last_block_size = block_size;
if (std::is_same<IndexType, std::int32_t>::value) {
if (BIT_RATE == 8) {
kernel32_no_sparse_ =
fbgemm::GenerateEmbeddingSpMDM<std::uint8_t, std::int32_t>(
block_size,
with_weights,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
} else {
kernel32_no_sparse_ =
fbgemm::GenerateEmbeddingSpMDMNBit<std::int32_t>(
BIT_RATE,
block_size,
weights != nullptr,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
}
} else {
CAFFE_ENFORCE((std::is_same<IndexType, std::int64_t>::value));
if (BIT_RATE == 8) {
kernel64_no_sparse_ =
fbgemm::GenerateEmbeddingSpMDM<std::uint8_t, std::int64_t>(
block_size,
with_weights,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
} else {
kernel64_no_sparse_ =
fbgemm::GenerateEmbeddingSpMDMNBit<std::int64_t>(
BIT_RATE,
block_size,
weights != nullptr,
is_mean,
/*prefetch distance*/ 16,
/*is_weight_positional*/ false,
/*use_offsets*/ false);
}
}
}
} // end if (block_size != last_block_size)
bool success;
if (!fallback_to_no_sparse) {
if (std::is_same<IndexType, std::int32_t>::value) {
success = kernel32_(
output_size,
index_size,
compressed_indices_mapping.size(),
input_data,
reinterpret_cast<const std::int32_t*>(indices_data),
lengths_data,
weights,
output_data,
compressed_indices_mapping_data);
} else {
success = kernel64_(
output_size,
index_size,
compressed_indices_mapping.size(),
input_data,
reinterpret_cast<const std::int64_t*>(indices_data),
lengths_data,
weights,
output_data,
compressed_indices_mapping_data);
}
} else { // fallback_to_no_sparse == true
if (std::is_same<IndexType, std::int32_t>::value) {
success = kernel32_no_sparse_(
output_size,
index_size,
data_size,
input_data,
reinterpret_cast<const std::int32_t*>(indices_data),
lengths_data,
weights,
output_data);
} else {
success = kernel64_no_sparse_(
output_size,
index_size,
data_size,
input_data,
reinterpret_cast<const std::int64_t*>(indices_data),
lengths_data,
weights,
output_data);
}
}
if (success) {
return true;
}
// Error handling
int64_t current = 0;
for (const auto m : c10::irange(output_size)) {
for (int i = 0; i < lengths_data[m]; ++i) {
CAFFE_ENFORCE_LT(current, index_size);
IndexType idx = indices_data[current];
if (!fallback_to_no_sparse) {
CAFFE_ENFORCE(
0 <= idx && idx < compressed_indices_mapping.size(),
"Index ",
current,
" is out of bounds: ",
idx,
", range 0 to ",
compressed_indices_mapping.size());
} else {
CAFFE_ENFORCE(
0 <= idx && idx < data_size,
"Index ",
current,
" is out of bounds: ",
idx,
", range 0 to ",
data_size);
}
++current;
}
}
CAFFE_ENFORCE_EQ(
current,
index_size,
"Your input seems to be incorrect: the sum of lengths values should be "
"the size of the indices tensor, but it appears not.");
return false;
#else
C10_LOG_EVERY_N(WARNING, 10)
<< "Running slow path because FBGEMM is not available";
int64_t current = 0;
for (const auto m : c10::irange(output_size)) {
memset(output_data, 0, block_size * sizeof(float));
if (current + lengths_data[m] > index_size) {
return false;
}
for (int i = 0; i < lengths_data[m]; ++i, ++current) {
IndexType idx;
if (fallback_to_no_sparse) {
idx = indices_data[current];
if (idx < 0 || idx >= data_size) {
return false;
}
} else {
IndexType uncompressed_idx = indices_data[current];
if (uncompressed_idx < 0 ||
uncompressed_idx >= compressed_indices_mapping.size()) {
return false;
}
idx = compressed_indices_mapping_data[uncompressed_idx];
if (idx == -1) {
continue;
}
}
const uint8_t* scale_bias = input_data + (idx + 1) * data.size(1) -
2 * (BIT_RATE == 8 ? sizeof(float) : sizeof(at::Half));
float weight = 1.0f;
if (with_weights) {
weight = weights[current];
}
float scale, bias;
if (BIT_RATE == 8) {
scale = weight * reinterpret_cast<const float*>(scale_bias)[0];
bias = weight * reinterpret_cast<const float*>(scale_bias)[1];
} else {
scale = weight * reinterpret_cast<const at::Half*>(scale_bias)[0];
bias = weight * reinterpret_cast<const at::Half*>(scale_bias)[1];
}
for (const auto j : c10::irange(block_size)) {
uint8_t quantized =
input_data[idx * data.size(1) + j / NUM_ELEM_PER_BYTE];
quantized >>= (j % NUM_ELEM_PER_BYTE) * BIT_RATE;
quantized &= (1 << BIT_RATE) - 1;
output_data[j] = std::fma(scale, quantized, output_data[j] + bias);
}
} // for each i
if (is_mean && lengths_data[m]) {
float scale = 1.0f / lengths_data[m];
for (const auto j : c10::irange(block_size)) {
output_data[j] *= scale;
}
}
output_data += block_size;
} // for each m
return current == index_size;
#endif // USE_FBGEMM
}
enum {
DATA = 0,
WEIGHTS = 1,
INDICES = 1 + with_weights,
LENGTHS = 2 + with_weights,
COMPRESSED_INDICES_MAPPING = 3 + with_weights,
};
#ifdef USE_FBGEMM
private:
std::int64_t last_block_size{-1};
fbgemm::EmbeddingSpMDMRowWiseSparseKernelSignature<
std::uint8_t,
std::int32_t>::Type kernel32_;
fbgemm::EmbeddingSpMDMRowWiseSparseKernelSignature<
std::uint8_t,
std::int64_t>::Type kernel64_;
fbgemm::EmbeddingSpMDMKernelSignature<std::uint8_t, std::int32_t>::Type
kernel32_no_sparse_;
fbgemm::EmbeddingSpMDMKernelSignature<std::uint8_t, std::int64_t>::Type
kernel64_no_sparse_;
#endif
}; // class SparseLengthsNBitRowwiseSparseOp
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LENGTHS_REDUCER_FUSED_8BIT_ROWWISE_OPS_H_
| 23,598
| 33.451095
| 80
|
h
|
null |
pytorch-main/caffe2/operators/lengths_reducer_ops.h
|
#pragma once
#include <c10/util/irange.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/perfkernels/embedding_lookup.h"
#ifdef USE_FBGEMM
#include "fbgemm/Fbgemm.h"
#endif
#include <algorithm>
#include <functional>
namespace caffe2 {
// A templated class that implements SparseLengths[Sum,WeightedSum,Mean].
template <
typename T, // output type
class InputTypes, // supported input types, such as TensorTypes<float>
bool USE_WEIGHT = false, // Whether it is SparseLengthsWeightedSum
bool USE_MEAN = false, // Whether this is SparseLengthsMean
bool USE_POSITIONAL_WEIGHT = false
// USE_WEIGHT = true and USE_POSITIONAL_WEIGHT = true
// -> SparseLengthsPositionalWeightedSum
>
class CPUSparseLengthsReductionOp : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
template <class... Args>
explicit CPUSparseLengthsReductionOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {
static_assert(
!(USE_WEIGHT & USE_MEAN), "Cannot both specify weight and mean.");
}
~CPUSparseLengthsReductionOp() override {}
// Currently, we support float and at::Half inputs for input data type, and
// int32_t and int64_t for the index type.
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(DATA));
}
template <typename InputType>
bool DoRunWithType() {
return DispatchHelper<TensorTypes2<int32_t, int64_t>, InputType>::call(
this, Input(INDICES));
}
template <typename InputType, typename IndexType>
bool DoRunWithType2() {
auto& dataInput = Input(DATA);
auto& indicesInput = Input(INDICES);
auto& lengthsInput = Input(LENGTHS);
const int64_t M = lengthsInput.size(0);
const int64_t indices_size = indicesInput.numel();
auto shape = dataInput.sizes().vec();
shape[0] = M;
auto* output = Output(0, shape, at::dtype<T>());
T* out_data = output->template mutable_data<T>();
if (indices_size == 0) {
if (M > 0) {
memset(out_data, 0, output->numel() * sizeof(T));
}
return true;
}
CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector");
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
const int64_t N = dataInput.size(0);
const int D = dataInput.size_from_dim(1);
const InputType* in_data = dataInput.template data<InputType>();
const IndexType* indices = indicesInput.template data<IndexType>();
const int* lengths = lengthsInput.template data<int>();
const T* in_weight = nullptr;
if (USE_WEIGHT) {
// static if
auto& weightInput = Input(WEIGHT);
CAFFE_ENFORCE_EQ(1, weightInput.dim(), "WEIGHT must be a vector");
if (!USE_POSITIONAL_WEIGHT) {
CAFFE_ENFORCE_EQ(
weightInput.numel(),
indices_size,
"Weight should have the same length as indices.");
}
in_weight = weightInput.template data<T>();
}
#ifdef USE_FBGEMM
// If this is the first call or block size has changed (should never
// happen actually), generate a kernel.
if (D != last_block_size) {
last_block_size = D;
if (std::is_same<InputType, float>::value) {
if (std::is_same<IndexType, std::int32_t>::value) {
kernel_fp32_i32_ =
fbgemm::GenerateEmbeddingSpMDM<float, std::int32_t>(
D,
USE_WEIGHT,
USE_MEAN,
/*prefetch distance*/ 16,
USE_POSITIONAL_WEIGHT,
/*use_offsets*/ false);
} else {
CAFFE_ENFORCE((std::is_same<IndexType, std::int64_t>::value));
kernel_fp32_i64_ =
fbgemm::GenerateEmbeddingSpMDM<float, std::int64_t>(
D,
USE_WEIGHT,
USE_MEAN,
/*prefetch distance*/ 16,
USE_POSITIONAL_WEIGHT,
/*use_offsets*/ false);
}
} else {
CAFFE_ENFORCE((std::is_same<InputType, at::Half>::value));
if (std::is_same<IndexType, std::int32_t>::value) {
kernel_fp16_i32_ =
fbgemm::GenerateEmbeddingSpMDM<fbgemm::float16, std::int32_t>(
D,
USE_WEIGHT,
USE_MEAN,
/*prefetch distance*/ 16,
USE_POSITIONAL_WEIGHT,
/*use_offsets*/ false);
} else {
CAFFE_ENFORCE((std::is_same<IndexType, std::int64_t>::value));
kernel_fp16_i64_ =
fbgemm::GenerateEmbeddingSpMDM<fbgemm::float16, std::int64_t>(
D,
USE_WEIGHT,
USE_MEAN,
/*prefetch distance*/ 16,
USE_POSITIONAL_WEIGHT,
/*use_offsets*/ false);
}
}
}
bool success;
if (std::is_same<InputType, float>::value) {
if (std::is_same<IndexType, std::int32_t>::value) {
success = kernel_fp32_i32_(
M,
indices_size,
N,
reinterpret_cast<const float*>(in_data),
indicesInput.template data<std::int32_t>(),
lengths,
in_weight,
out_data);
} else {
success = kernel_fp32_i64_(
M,
indices_size,
N,
reinterpret_cast<const float*>(in_data),
indicesInput.template data<std::int64_t>(),
lengths,
in_weight,
out_data);
}
} else {
if (std::is_same<IndexType, std::int32_t>::value) {
success = kernel_fp16_i32_(
M,
indices_size,
N,
reinterpret_cast<const fbgemm::float16*>(in_data),
indicesInput.template data<std::int32_t>(),
lengths,
in_weight,
out_data);
} else {
success = kernel_fp16_i64_(
M,
indices_size,
N,
reinterpret_cast<const fbgemm::float16*>(in_data),
indicesInput.template data<std::int64_t>(),
lengths,
in_weight,
out_data);
}
}
if (success) {
return true;
}
int64_t current = 0;
for (const auto m : c10::irange(M)) {
for (int i = 0; i < lengths[m]; ++i) {
CAFFE_ENFORCE_LT(
current,
indices_size,
"Your input seems to be incorrect: the sum of lengths values "
"should be the size of the indices tensor, but it appears not.");
IndexType idx = indices[current];
CAFFE_ENFORCE(
0 <= idx && idx < N,
"Index ",
current,
" is out of bounds: ",
idx,
", range 0 to ",
N,
", actual batch length is ",
M);
++current;
}
}
CAFFE_ENFORCE_EQ(
current,
indices_size,
"Your input seems to be incorrect: the sum of lengths values should be "
"the size of the indices tensor, but it appears not.");
return false;
#endif
// delegate work to perfkernel that branches based on architecture
EmbeddingLookup<IndexType, InputType, T, USE_POSITIONAL_WEIGHT>(
D,
M,
indices_size,
N,
in_data,
indices,
lengths,
in_weight,
nullptr, // scale_bias field is only used in SparseLengths8BitsRowwiseOp
USE_MEAN,
out_data);
return true;
}
enum {
DATA = 0, // Data input.
WEIGHT = 1, // Weight input used in SparseLengthsWeightedSum
INDICES = 1 + USE_WEIGHT, // 1 in SparseLengths[Sum,Mean] and
// 2 in SparseLengthsWeightedSum
LENGTHS = 2 + USE_WEIGHT, // 2 in SparseLengths[Sum, Mean],
// 3 in SparseLengthsWeightedSum
};
#ifdef USE_FBGEMM
private:
std::int64_t last_block_size{-1};
fbgemm::EmbeddingSpMDMKernelSignature<float, std::int32_t>::Type
kernel_fp32_i32_;
fbgemm::EmbeddingSpMDMKernelSignature<float, std::int64_t>::Type
kernel_fp32_i64_;
fbgemm::EmbeddingSpMDMKernelSignature<fbgemm::float16, std::int32_t>::Type
kernel_fp16_i32_;
fbgemm::EmbeddingSpMDMKernelSignature<fbgemm::float16, std::int64_t>::Type
kernel_fp16_i64_;
#endif
};
template <typename T, class Context, class Engine = DefaultEngine>
class TTSparseLengthsSumOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit TTSparseLengthsSumOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
factor_i(this->template GetRepeatedArgument<int>(
"factor_i",
vector<int>{1, 1, 1})),
factor_j(this->template GetRepeatedArgument<int>(
"factor_j",
vector<int>{1, 1, 1})),
ranks(this->template GetRepeatedArgument<int>(
"ranks",
vector<int>{1, 1, 1, 1})),
emb_size(this->template GetSingleArgument<int>("emb_size", 64)) {
// cumprod of i, used for index slice
l_cumprod.push_back(1);
for (const auto i : c10::irange(1, factor_i.size())) {
l_cumprod.push_back(l_cumprod[i - 1] * factor_i[i - 1]);
}
}
~TTSparseLengthsSumOp() override {}
void Ind2Sub(int64_t* out_factor_index, const int64_t* indices, int len) {
// TODO: vectorization
auto N = factor_i.size();
for (const auto j : c10::irange(len)) {
auto idx = indices[j];
for (int i = N; i > 0; i--) {
out_factor_index[j * N + i - 1] = idx / l_cumprod[i - 1];
idx = idx % l_cumprod[i - 1];
}
}
}
bool GetSlice(
std::vector<std::vector<T>>& tgt_slice,
const T* core,
const vector<int64_t>& ind_slice,
int bs,
int idx) {
// implement the functionality index_select(core, 1, ind_slice)
auto num_of_elements = ranks[idx] * factor_j[idx] * ranks[idx + 1];
for (const auto i : c10::irange(bs)) {
memcpy(
tgt_slice[i].data(),
core + ind_slice[i] * num_of_elements,
num_of_elements * sizeof(T));
}
return true;
}
// ind: it stores the index to each tensor core
// bs: the number of indices
// GatherAllRows uses two steps to calculate the lengthsum functionality: 1) it uses tensor train
// to calculate the embedding for each index. 2) it sums the embedding for each bag.
// In Step 1), it batches all the indices together. Specifically, for every index, it uses the pre-computed
// ind of each tensor core to extract the corresponding slice of the core. Then it does gemm operation
// sequentially on the slices to produce the embedding result for each index.
// In Step 2), it takes the embedding computed in step 1) and apply the sum operation for each bag.
bool GatherAllRows(
int64_t* ind,
int bs,
int x_len,
vector<const T*> cores,
int segments,
const int* lengths,
T* out_data) {
// compute the largest memory consumption of intermediate result
// TODO: dynamic allocation size: cur_rows*factor_j[i]*ranks[i+1]
// and also explore the contiguous memory storage for res and int_res
int max_rank = *max_element(ranks.begin(), ranks.end());
std::vector<std::vector<T>> res(bs, std::vector<T>(emb_size * max_rank, 0));
std::vector<std::vector<T>> int_res(
bs, std::vector<T>(emb_size * max_rank, 0));
// Store the matrix A
vector<T*> Y_ptr(bs);
// Store the intermediate result in each layer
vector<T*> Z_ptr(bs);
for (const auto b : c10::irange(bs)) {
Y_ptr[b] = res[b].data();
Z_ptr[b] = int_res[b].data();
}
vector<int64_t> ind_slice(bs);
int rows = 0;
for (const auto i : c10::irange(x_len)) {
// slice cur
for (const auto j : c10::irange(bs)) {
ind_slice[j] = ind[x_len * j + i];
}
if (i == 0) {
GetSlice(res, cores[i], ind_slice, bs, i);
rows = factor_j[0];
} else {
std::vector<std::vector<T>> slice(
bs, std::vector<T>(ranks[i] * factor_j[i] * ranks[i + 1], 0));
vector<const T*> X_ptr(bs);
for (const auto b : c10::irange(bs)) {
X_ptr[b] = slice[b].data();
}
GetSlice(slice, cores[i], ind_slice, bs, i);
math::GemmBatched<T, CPUContext>(
CblasNoTrans,
CblasNoTrans,
bs,
rows,
factor_j[i] * ranks[i + 1],
ranks[i],
1.0f,
const_cast<const T**>(Y_ptr.data()),
X_ptr.data(),
0.0f,
Z_ptr.data(),
&context_);
for (const auto b : c10::irange(bs)) {
std::memcpy(Y_ptr[b], Z_ptr[b], (emb_size * max_rank) * sizeof(T));
}
rows *= factor_j[i];
}
// save the intermediate output for backward path
// shape for the core
auto shape = vector<int64_t>({bs, rows, ranks[i + 1]});
if (i < 2) {
auto* core_data = Output(i + 1, shape, at::dtype<T>());
T* out_core = core_data->template mutable_data<T>();
for (const auto b : c10::irange(bs)) {
std::memcpy(
out_core + b * rows * ranks[i + 1],
Y_ptr[b],
rows * ranks[i + 1] * sizeof(T));
}
}
}
// reduction and store back to output
vector<int64_t> cum_lengths(segments);
for (const auto seg : c10::irange(segments)) {
cum_lengths[seg] =
seg == 0 ? lengths[0] : lengths[seg] + cum_lengths[seg - 1];
}
int length_idx = 0;
vector<T> tmp_sum(emb_size, 0.0f);
for (int i = 0; i <= bs; i++) {
while ((length_idx < segments) && (i == cum_lengths[length_idx])) {
// store the tmp_sum into output
memcpy(
&out_data[length_idx * emb_size],
tmp_sum.data(),
emb_size * sizeof(T));
length_idx++;
fill(tmp_sum.begin(), tmp_sum.end(), 0.0f);
}
if (i == bs) {
break;
}
transform(
res[i].begin(),
res[i].begin() + emb_size,
tmp_sum.begin(),
tmp_sum.begin(),
std::plus<T>());
}
return true;
}
bool RunOnDevice() override {
const auto& dataInput0 = Input(0);
const auto& dataInput1 = Input(1);
const auto& dataInput2 = Input(2);
const auto& indicesInput = Input(3);
const auto& lengthsInput = Input(4);
CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector");
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
int N = factor_i.size();
const int64_t M = lengthsInput.size(0);
auto shape = vector<int64_t>({M, emb_size});
auto* output = Output(0, shape, at::dtype<T>());
T* out_data = output->template mutable_data<T>();
const T* core0 = dataInput0.template data<T>();
const T* core1 = dataInput1.template data<T>();
const T* core2 = dataInput2.template data<T>();
const int* lengths = lengthsInput.template data<int>();
vector<const T*> cores = {core0, core1, core2};
const int64_t* indices = indicesInput.template data<int64_t>();
// Store the factor index for backward path
auto index_shape = vector<int64_t>({indicesInput.size(), N});
auto* index_data = Output(3, index_shape, at::dtype<int64_t>());
int64_t* out_factor_index = index_data->template mutable_data<int64_t>();
// Store the factorized index for each core
Ind2Sub(out_factor_index, indices, indicesInput.size());
return GatherAllRows(
out_factor_index, indicesInput.size(), N, cores, M, lengths, out_data);
}
protected:
vector<int> factor_i;
vector<int> factor_j;
vector<int> ranks;
vector<int> l_cumprod;
int emb_size;
};
template <typename T, class Context>
class TTSparseLengthsSumGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit TTSparseLengthsSumGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override;
~TTSparseLengthsSumGradientOp() override {}
};
// implement the gradient op for TTLengthSumGradient op
template <typename T, class Context>
bool TTSparseLengthsSumGradientOp<T, Context>::RunOnDevice() {
const auto& core0 = Input(0);
const auto& core1 = Input(1);
const auto& core2 = Input(2);
const auto& lengths = Input(3);
const auto& core0_out = Input(4);
const auto& core1_out = Input(5);
const auto& index_out = Input(6);
const auto& dY = Input(7);
const int* lengths_data = lengths.template data<int>();
const T* dY_data = dY.template data<T>();
// restore the arguments from shape
const int64_t bs = index_out.size(0);
const int64_t emb_size = dY.size(1);
const int64_t num_segments = lengths.size(0);
auto core0_shape = core0.sizes().vec();
auto core1_shape = core1.sizes().vec();
auto core2_shape = core2.sizes().vec();
auto core0_out_shape = core0_out.sizes().vec();
auto core1_out_shape = core1_out.sizes().vec();
auto* dCore0 = Output(0, core0_shape, at::dtype<T>());
auto* dCore1 = Output(1, core1_shape, at::dtype<T>());
auto* dCore2 = Output(2, core2_shape, at::dtype<T>());
T* dCore0_data = dCore0->template mutable_data<T>();
T* dCore1_data = dCore1->template mutable_data<T>();
T* dCore2_data = dCore2->template mutable_data<T>();
memset(
dCore0_data,
0.0f,
sizeof(T) *
accumulate(
core0_shape.begin(), core0_shape.end(), 1, std::multiplies<T>()));
memset(
dCore1_data,
0.0f,
sizeof(T) *
accumulate(
core1_shape.begin(), core1_shape.end(), 1, std::multiplies<T>()));
memset(
dCore2_data,
0.0f,
sizeof(T) *
accumulate(
core2_shape.begin(), core2_shape.end(), 1, std::multiplies<T>()));
int64_t* index_out_data = index_out.template mutable_data<int64_t>();
vector<vector<int64_t>> index_slice(bs, vector<int64_t>(3, 0));
for (const auto b : c10::irange(bs)) {
memcpy(index_slice[b].data(), index_out_data + b * 3, 3 * sizeof(int64_t));
}
vector<const T*> A_ptr(bs);
vector<T*> B_ptr(bs);
vector<T*> C_ptr(bs);
// size of each batch
int64_t num_of_elements = 0;
// construct the ranks
// expand the gradient into all indices
vector<vector<T>> core2_out_grad(bs, vector<T>(emb_size, 0));
int64_t data_index = 0;
for (const auto range_index : c10::irange(num_segments)) {
for (int64_t start = data_index;
data_index < start + lengths_data[range_index];
++data_index) {
memcpy(
core2_out_grad[data_index].data(),
dY_data + range_index * emb_size,
emb_size * sizeof(T));
}
}
// =======================================================
// Calculate dCore2_data:
// 1) Transpose core1_out and multiply iwth core2_out_grad
// 2) add to dCore2_data
vector<vector<T>> dCore2_data_slice_grad(
bs, vector<T>(core2_shape[1] * core2_shape[2] * core2_shape[3], 0));
const T* core1_out_data = core1_out.template data<T>();
// const T* core1_out_p[bs];
for (const auto b : c10::irange(bs)) {
A_ptr[b] = core1_out_data + b * core1_out.size(1) * core1_out.size(2);
B_ptr[b] = core2_out_grad[b].data();
C_ptr[b] = dCore2_data_slice_grad[b].data();
}
math::GemmBatched<T, CPUContext>(
CblasTrans,
CblasNoTrans,
bs,
core2.size(1), // M
core2.size(2) * core2.size(3), // N
core1_out.size(1), // K
1.0f,
const_cast<const T**>(A_ptr.data()),
const_cast<const T**>(B_ptr.data()),
0.0f,
C_ptr.data(),
&context_);
// update the corresponding slice
num_of_elements = core2_shape[1] * core2_shape[2] * core2_shape[3];
T* core2_data = core2.template mutable_data<T>();
vector<vector<T>> core2_slice(
bs, vector<T>(core2_shape[1] * core2_shape[2] * core2_shape[3], 0));
for (const auto b : c10::irange(bs)) {
for (const auto i : c10::irange(num_of_elements)) {
dCore2_data[index_slice[b][2] * num_of_elements + i] += C_ptr[b][i];
}
memcpy(
core2_slice[b].data(),
core2_data + index_slice[b][2] * num_of_elements,
sizeof(T) * num_of_elements);
}
// Calculate core1_out_grad
vector<vector<T>> core1_out_grad(
bs, vector<T>(core1_out_shape[1] * core1_out_shape[2], 0));
for (const auto b : c10::irange(bs)) {
A_ptr[b] = core2_out_grad[b].data();
B_ptr[b] = core2_slice[b].data();
C_ptr[b] = core1_out_grad[b].data();
}
math::GemmBatched<T, CPUContext>(
CblasNoTrans,
CblasTrans,
bs,
core1_out.size(1), // M
core2_shape[1], // N
core2_shape[2] * core2_shape[3], // K
1.0f,
const_cast<const T**>(A_ptr.data()),
const_cast<const T**>(B_ptr.data()),
0.0f,
C_ptr.data(),
&context_);
// =======================================================
// Calculate dCore1_data:
// 1) Transpose core1_out_grad and multiply with core0_out
// 2) Transpose the result and then add to dCore1_data
vector<vector<T>> dCore1_data_slice_grad(
bs, vector<T>(core1_shape[1] * core1_shape[2] * core1_shape[3], 0));
const T* core0_out_data = core0_out.template data<T>();
for (const auto b : c10::irange(bs)) {
A_ptr[b] = core0_out_data + b * core0_out.size(1) * core0_out.size(2);
B_ptr[b] = core1_out_grad[b].data();
C_ptr[b] = dCore1_data_slice_grad[b].data();
}
math::GemmBatched<T, CPUContext>(
CblasTrans,
CblasNoTrans,
bs,
core1.size(1), // M
core1.size(2) * core1.size(3), // N
core0_out.size(1), // K
1.0f,
const_cast<const T**>(A_ptr.data()),
const_cast<const T**>(B_ptr.data()),
0.0f,
C_ptr.data(),
&context_);
// update the corresponding slice
num_of_elements = core1_shape[1] * core1_shape[2] * core1_shape[3];
T* core1_data = core1.template mutable_data<T>();
vector<vector<T>> core1_slice(
bs, vector<T>(core1_shape[1] * core1_shape[2] * core1_shape[3], 0));
for (const auto b : c10::irange(bs)) {
for (const auto i : c10::irange(num_of_elements)) {
dCore1_data[index_slice[b][1] * num_of_elements + i] += C_ptr[b][i];
}
memcpy(
core1_slice[b].data(),
core1_data + index_slice[b][1] * num_of_elements,
sizeof(T) * num_of_elements);
}
// Calculate core0_out_grad
vector<vector<T>> core0_out_grad(
bs, vector<T>(core0_out_shape[1] * core0_out_shape[2], 0));
for (const auto b : c10::irange(bs)) {
A_ptr[b] = core1_out_grad[b].data();
B_ptr[b] = core1_slice[b].data();
C_ptr[b] = core0_out_grad[b].data();
}
math::GemmBatched<T, CPUContext>(
CblasNoTrans,
CblasTrans,
bs,
core0_out.size(1), // M
core1_shape[1], // N
core1_shape[2] * core1_shape[3], // K
1.0f,
const_cast<const T**>(A_ptr.data()),
const_cast<const T**>(B_ptr.data()),
0.0f,
C_ptr.data(),
&context_);
num_of_elements = core0_shape[1] * core0_shape[2] * core0_shape[3];
for (const auto b : c10::irange(bs)) {
for (const auto i : c10::irange(num_of_elements)) {
dCore0_data[index_slice[b][0] * num_of_elements + i] += C_ptr[b][i];
}
}
return true;
}
} // namespace caffe2
| 23,499
| 31.369146
| 109
|
h
|
null |
pytorch-main/caffe2/operators/lengths_reducer_rowwise_8bit_ops.h
|
#ifndef CAFFE2_OPERATORS_LENGTHS_REDUCER_ROWWISE_8bits_OP_H_
#define CAFFE2_OPERATORS_LENGTHS_REDUCER_ROWWISE_8bits_OP_H_
// SparseLengthsSum8bits
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/reducer_functors.h"
#include "caffe2/perfkernels/embedding_lookup.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
const float kEqualityThreshold = 1e-10f;
}
template <
class Context,
bool USE_WEIGHTS = 0,
bool USE_MEAN = 0,
class OutDataT = float>
class SparseLengths8BitsRowwiseOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(SparseLengths8BitsRowwiseOp);
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename IndexType>
bool DoRunWithType() {
auto& dataInput = Input(DATA);
auto& lengthsInput = Input(LENGTHS);
auto* scale_bias = Input(SCALE_BIAS).template data<float>();
CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector");
const int64_t outputSize = lengthsInput.size(0);
auto& indicesInput = Input(INDICES);
CAFFE_ENFORCE_EQ(2, Input(SCALE_BIAS).dim(), "scale_bias has to be matrix");
CAFFE_ENFORCE_EQ(
dataInput.size(0),
Input(SCALE_BIAS).size(0),
"scale_bias must have the same first dim as data");
CAFFE_ENFORCE_EQ(
2,
Input(SCALE_BIAS).size(1),
"the second dim of scale_bias has to be equal to 2");
CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector");
const IndexType* indices = indicesInput.template data<IndexType>();
const int* lengths = lengthsInput.template data<int>();
vector<int64_t> shape = dataInput.sizes().vec();
shape[0] = outputSize;
auto* output = Output(0, shape, at::dtype<OutDataT>());
const float* w = nullptr;
if (USE_WEIGHTS) {
w = Input(WEIGHTS).template data<float>();
}
int64_t in_block_size = dataInput.size_from_dim(1);
OutDataT* out = output->template mutable_data<OutDataT>();
const uint8_t* input_data = dataInput.template data<uint8_t>();
// delegate work to perfkernel that branches based on architecture
const int64_t indices_size = indicesInput.numel();
const int64_t N = dataInput.size(0);
EmbeddingLookup(
in_block_size,
outputSize,
indices_size,
N, // embedding table length
input_data,
indices,
lengths,
w,
scale_bias,
USE_MEAN,
out);
return true;
}
enum {
DATA = 0,
WEIGHTS = 1,
INDICES = 1 + USE_WEIGHTS,
LENGTHS = 2 + USE_WEIGHTS,
SCALE_BIAS = 3 + USE_WEIGHTS
};
};
template <class Context>
class FloatToRowwiseQuantized8BitsOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(FloatToRowwiseQuantized8BitsOp);
bool RunOnDevice() override {
auto& input = Input(DATA_FLOAT);
auto* input_data = input.template data<float>();
auto* output = Output(DATA_UINT8, input.sizes(), at::dtype<uint8_t>());
vector<int64_t> scale_bias_dims = {input.size(0), 2};
auto* scale_bias = Output(SCALE_BIAS, scale_bias_dims, at::dtype<float>());
auto* output_data = output->template mutable_data<uint8_t>();
float* scale_bias_data = scale_bias->template mutable_data<float>();
size_t n_blocks = input.size(0);
size_t block_size = input.size_from_dim(1);
for (const auto i : c10::irange(n_blocks)) {
ConstEigenVectorArrayMap<float> input_row(
input_data + i * block_size, block_size);
EigenVectorArrayMap<uint8_t> output_row(
output_data + i * block_size, block_size);
auto min_element = input_row.minCoeff();
auto max_element = input_row.maxCoeff();
if (max_element - min_element < kEqualityThreshold) {
scale_bias_data[2 * i] = 1.0f;
scale_bias_data[2 * i + 1] = min_element;
memset(output_data + i * block_size, 0, block_size);
} else {
scale_bias_data[2 * i] = (max_element - min_element) / 255.0f;
scale_bias_data[2 * i + 1] = min_element;
const float inv_scale = 1.0f / scale_bias_data[2 * i];
output_row = ((input_row - scale_bias_data[2 * i + 1]) * inv_scale)
.round()
.template cast<uint8_t>();
}
}
return true;
}
private:
INPUT_TAGS(DATA_FLOAT);
OUTPUT_TAGS(DATA_UINT8, SCALE_BIAS);
};
template <class Context>
class Rowwise8BitQuantizedToFloatOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(Rowwise8BitQuantizedToFloatOp);
bool RunOnDevice() override {
auto& input = Input(DATA_UINT8);
auto& scale_bias = Input(SCALE_BIAS);
CAFFE_ENFORCE_EQ(2, scale_bias.dim(), "scale_bias has to be matrix");
CAFFE_ENFORCE_EQ(
input.size(0),
scale_bias.size(0),
"scale_bias must have the same first dim as data");
CAFFE_ENFORCE_EQ(
2,
scale_bias.size(1),
"the second dim of scale_bias has to be equal to 2");
auto* output = Output(DATA_FLOAT, input.sizes(), at::dtype<float>());
auto* input_data = input.template data<uint8_t>();
auto* scale_bias_data = scale_bias.template data<float>();
auto* output_data = output->template mutable_data<float>();
size_t block_size = input.size_from_dim(1);
size_t n_blocks = input.size(0);
for (const auto i : c10::irange(n_blocks)) {
ConstEigenVectorArrayMap<uint8_t> input_row(
input_data + i * block_size, block_size);
EigenVectorArrayMap<float> output_row(
output_data + i * block_size, block_size);
output_row = input_row.template cast<float>() * scale_bias_data[2 * i] +
scale_bias_data[2 * i + 1];
}
return true;
}
private:
INPUT_TAGS(DATA_UINT8, SCALE_BIAS);
OUTPUT_TAGS(DATA_FLOAT);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LENGTHS_REDUCER_ROWWISE_8bits_H_
| 6,137
| 32.358696
| 80
|
h
|
null |
pytorch-main/caffe2/operators/lengths_top_k_op.h
|
#ifndef CAFFE2_OPERATORS_LENGTHS_TOP_K_OP_H_
#define CAFFE2_OPERATORS_LENGTHS_TOP_K_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class LengthsTopKOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LengthsTopKOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "k", k_, -1) {
CAFFE_ENFORCE_GE(k_, 1, "k argument must be >= 1");
}
bool RunOnDevice() override;
protected:
int k_;
INPUT_TAGS(X_IN, Y_IN);
OUTPUT_TAGS(TOPK_VALUES_OUT, TOPK_INDICES_OUT);
};
template <typename T, class Context>
class LengthsTopKGradientOp : public Operator<Context> {
public:
template <class... Args>
explicit LengthsTopKGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "k", k_, -1) {
CAFFE_ENFORCE_GE(k_, 1, "k argument must be >= 1");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
int k_;
INPUT_TAGS(LENGTH_IN, INDICES_IN, DER_TOPK_IN);
OUTPUT_TAGS(DER_X_OUT);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LENGTHS_TOP_K_OP_H_
| 1,358
| 24.166667
| 56
|
h
|
null |
pytorch-main/caffe2/operators/listwise_l2r_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class LambdaRankNdcgOp final : public Operator<Context> {
public:
template <class... Args>
explicit LambdaRankNdcgOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
use_ndcg_as_loss_(
this->template GetSingleArgument<bool>("use_ndcg_as_loss", false)),
use_idcg_normalization_(this->template GetSingleArgument<bool>(
"use_idcg_normalization",
true)),
use_exp_gain_(
this->template GetSingleArgument<bool>("use_exp_gain", true)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
private:
INPUT_TAGS(PRED, REL, SESSION_LENS);
OUTPUT_TAGS(LOSS, DPRED);
void ResizeInvLogITensor(int);
void ComputeDiscounts(int*, int);
float LambdaRankNdcgSession(
int start_index,
int end_index,
const Tensor& y,
const Tensor& r,
Tensor** dy);
bool use_ndcg_as_loss_;
bool use_idcg_normalization_;
bool use_exp_gain_;
Tensor gain_;
Tensor discount_;
Tensor rank_idx_;
Tensor ideal_idx_;
Tensor lambda_;
Tensor inv_log_i_;
};
template <typename T, class Context>
class LambdaRankNdcgGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(LambdaRankNdcgGradientOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
private:
INPUT_TAGS(Y, SESSION_LENS, DY_CACHE, DLOSS);
OUTPUT_TAGS(DY);
};
} // namespace caffe2
| 1,677
| 25.21875
| 79
|
h
|
null |
pytorch-main/caffe2/operators/load_save_op.h
|
#ifndef CAFFE2_OPERATORS_LOAD_SAVE_OP_H_
#define CAFFE2_OPERATORS_LOAD_SAVE_OP_H_
#include <cstdio>
#include <map>
#include <unordered_set>
#include <c10/util/irange.h>
#include <c10/util/string_view.h>
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/context.h"
#include "caffe2/core/db.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/load_save_op_util.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/proto_utils.h"
namespace caffe2 {
using db::Cursor;
using db::DB;
using db::Transaction;
template <class Context>
class DBExistsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit DBExistsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
ws_(ws),
absolute_path_(
this->template GetSingleArgument<int>("absolute_path", false)),
db_name_(this->template GetSingleArgument<string>("db_name", "")),
db_type_(this->template GetSingleArgument<string>("db_type", "")) {}
bool RunOnDevice() override {
string full_db_name =
absolute_path_ ? db_name_ : (ws_->RootFolder() + "/" + db_name_);
auto* output = Output(0);
output->Resize();
bool* exists = output->template mutable_data<bool>();
*exists = caffe2::db::DBExists(db_type_, full_db_name);
return true;
}
private:
Workspace* ws_;
bool absolute_path_;
std::string db_name_;
std::string db_type_;
};
template <class Context>
class LoadOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit LoadOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
ws_(ws),
absolute_path_(
this->template GetSingleArgument<int>("absolute_path", false)),
add_prefix_(this->template GetSingleArgument<string>("add_prefix", "")),
strip_prefix_(
this->template GetSingleArgument<string>("strip_prefix", "")),
db_name_(this->template GetSingleArgument<string>("db", "")),
db_names_(this->template GetRepeatedArgument<string>("dbs")),
db_type_(this->template GetSingleArgument<string>("db_type", "")),
db_options_(this->template GetSingleArgument<string>("db_options", "")),
keep_device_(this->template GetSingleArgument<int>("keep_device", 0)),
load_all_(this->template GetSingleArgument<int>("load_all", 0)),
allow_incomplete_(
this->template GetSingleArgument<bool>("allow_incomplete", false)),
blob_names_(
this->template GetRepeatedArgument<string>("source_blob_names")),
shape_(this->template GetRepeatedArgument<int64_t>("shape")) {
if (InputSize() == 0) {
CAFFE_ENFORCE_GT(db_type_.size(), 0, "Must specify a db type.");
if (db_names_.empty()) {
CAFFE_ENFORCE_GT(db_name_.size(), 0, "Must specify a db name.");
db_names_.push_back(db_name_);
db_name_ = "";
} else {
std::set<std::string> db_name_set;
for (const string& db_name : db_names_) {
CAFFE_ENFORCE_GT(db_name.size(), 0, "Db name should not be empty.");
CAFFE_ENFORCE(
db_name_set.insert(db_name).second,
"Duplicated db name: ",
db_name);
}
db_name_ = "";
}
}
CAFFE_ENFORCE(
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
blob_names_.empty() || blob_names_.size() == OutputSize(),
"Number of output blobs and source_blob_names mismatch.");
CAFFE_ENFORCE(
blob_names_.empty() || strip_prefix_.empty(),
"strip_prefix and source_blob_names are mutually exclusive.");
CAFFE_ENFORCE(
blob_names_.empty() || !load_all_,
"cannot load_all_ while using source_blob_names.");
if (!load_all_) {
// blob_names_ will be filled with ''source blob names'' in file/db
// if argument source_blob_names is not given, then blob_names_ is
// inferred from operator output
if (blob_names_.empty()) {
for (const string& name : operator_def.output()) {
blob_names_.push_back(name);
}
}
int idx = 0;
std::set<std::string> name_set;
for (const string& name : blob_names_) {
CAFFE_ENFORCE(
name_set.insert(name).second,
"Duplicated source blob name: ",
name);
output_indices_[name] = idx++;
}
}
}
void SetCurrentDevice(BlobProto* proto);
bool RunOnDevice() override {
int total_loaded_blobs = 0;
std::unordered_map<string, load_save_op_util::BlobState> blob_states;
if (InputSize() > 0) {
for (const auto i : c10::irange(InputSize())) {
const db::DBReader& reader = this->template Input<db::DBReader>(i);
extract(i, reader.cursor(), &blob_states, &total_loaded_blobs);
}
} else {
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto i : c10::irange(db_names_.size())) {
string full_db_name = absolute_path_
? db_names_[i]
: (ws_->RootFolder() + "/" + db_names_[i]);
std::unique_ptr<DB> in_db(
caffe2::db::CreateDB(db_type_, full_db_name, caffe2::db::READ));
if (!db_options_.empty()) {
in_db->SetOptions(db_options_);
}
CAFFE_ENFORCE(
in_db.get(),
"Cannot find db implementation of type ",
db_type_,
" (while trying to open ",
full_db_name,
")");
std::unique_ptr<Cursor> cursor(in_db->NewCursor());
extract(i, cursor.get(), &blob_states, &total_loaded_blobs);
}
}
load_save_op_util::validateBlobStates(blob_states);
// Loaded all the needed blobs.
if (!load_all_ && total_loaded_blobs == OutputSize()) {
VLOG(1) << "Loaded " << total_loaded_blobs << " blobs fully from db(s)";
return true;
}
if (load_all_) {
for (const string& name : this->debug_def().output()) {
CAFFE_ENFORCE(
blob_states.count(name),
"Output blob name ",
name,
" does not exist in the db(s).");
}
return true;
}
// Only loaded a subset of the blobs.
if (allow_incomplete_) {
VLOG(1) << "Loaded " << total_loaded_blobs << " blobs out of "
<< OutputSize() << " blobs from db(s).";
for (const auto& output_index : output_indices_) {
if (!blob_states.count(output_index.first)) {
const auto& blobName = output_index.first;
const auto* blob = ws_->GetBlob(output_index.first);
if (blob == nullptr || blob->GetRaw() == nullptr){
// If blob was not loaded in this op and
// it did not exist in the workspace before,
// remove it.
ws_->RemoveBlob(blobName);
}
}
}
} else {
for (const string& output_name : this->debug_def().output()) {
if (blob_states.count(output_name) == 0) {
LOG(ERROR) << "Failed to load blob: " << output_name;
}
}
CAFFE_THROW(
"Expected to load ",
OutputSize(),
" blobs, got ",
total_loaded_blobs,
" only.\n");
}
return true;
}
private:
void extract(
int db_id,
Cursor* cursor,
std::unordered_map<string, load_save_op_util::BlobState>* blob_states,
int* total_loaded_blobs) {
if (load_all_) {
extractAll(db_id, cursor, blob_states, total_loaded_blobs);
} else {
extractFrom(
db_id,
cursor,
OperatorBase::Outputs(),
blob_states,
total_loaded_blobs);
}
}
void extractAll(
int db_id,
Cursor* cursor,
std::unordered_map<string, load_save_op_util::BlobState>* blob_states,
int* total_loaded_blobs) {
CAFFE_ENFORCE(cursor, "cursor is not valid");
int loaded_blobs = 0;
for (; cursor->Valid(); cursor->Next()) {
const auto key = load_save_op_util::buildBlobNameFromDbKey(
cursor->key(), strip_prefix_, add_prefix_);
if (key_to_dbid_.count(key) && key_to_dbid_[key] != db_id) {
CAFFE_THROW("Duplicate Key ", key, " is found!\n");
} else {
key_to_dbid_[key] = db_id;
}
BlobProto proto;
CAFFE_ENFORCE(
proto.ParseFromString(cursor->value()), "Couldn't parse Proto");
if (!keep_device_) {
// If we are not keeping the device as the one specified in the
// proto, we will set the current device.
SetCurrentDevice(&proto);
}
Blob* blob = ws_->CreateBlob(key);
load_save_op_util::ProcessBlob(
blob, proto, blob_states, key, &loaded_blobs);
}
*total_loaded_blobs += loaded_blobs;
}
void extractFrom(
int db_id,
Cursor* cursor,
const vector<Blob*>& outputs,
std::unordered_map<string, load_save_op_util::BlobState>* blob_states,
int* total_loaded_blobs) {
CAFFE_ENFORCE(cursor);
int loaded_blobs = 0;
for (; cursor->Valid(); cursor->Next()) {
const auto key = load_save_op_util::buildBlobNameFromDbKey(
cursor->key(), strip_prefix_, add_prefix_);
if (!output_indices_.count(key)) {
VLOG(1) << "Key " << key << " not used. Skipping.";
} else {
if (key_to_dbid_.count(key) && key_to_dbid_[key] != db_id) {
CAFFE_THROW("Duplicate Key ", key, " is found!\n");
} else {
key_to_dbid_[key] = db_id;
}
VLOG(2) << "Deserializing blob " << key;
BlobProto proto;
CAFFE_ENFORCE(proto.ParseFromString(cursor->value()));
if (!keep_device_) {
// If we are not keeping the device as the one specified in the
// proto, we will set the current device.
SetCurrentDevice(&proto);
}
auto blobIndex = output_indices_[key];
Blob* blob = outputs.at(blobIndex);
load_save_op_util::ProcessBlob(
blob, proto, blob_states, key, &loaded_blobs);
if (*total_loaded_blobs + loaded_blobs == OutputSize()) {
break;
}
}
}
*total_loaded_blobs += loaded_blobs;
}
private:
Workspace* ws_;
bool absolute_path_;
string add_prefix_;
string strip_prefix_;
string db_name_;
std::vector<std::string> db_names_;
string db_type_;
std::string db_options_;
bool keep_device_;
bool load_all_;
bool allow_incomplete_;
std::map<string, int> output_indices_;
std::map<string, int> key_to_dbid_;
std::vector<std::string> blob_names_;
std::vector<int64_t> shape_;
};
namespace internal {
class TORCH_API SaveOpImpl {
public:
SaveOpImpl(OperatorBase* op, const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice();
private:
OperatorBase* operator_;
std::string strip_prefix_;
std::string full_db_name_;
std::string db_type_;
std::string db_options_;
std::vector<std::string> blob_names_;
SerializationOptions options_;
};
} // namespace internal
template <class Context>
class SaveOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
explicit SaveOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws), impl_(this, operator_def, ws) {}
bool RunOnDevice() override {
return impl_.RunOnDevice();
}
private:
internal::SaveOpImpl impl_;
};
template <typename... Ts>
std::string FormatString(const std::string& pattern, Ts... values) {
// Start with an initial buffer size that is probably enough most of the time.
std::string buffer(256, '\0');
auto bytes_written =
snprintf(&buffer[0], buffer.size(), pattern.c_str(), values...);
if (bytes_written < 0) {
throw std::runtime_error("FormatString failed");
}
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
if (bytes_written > buffer.size()) {
// Our initial buffer size wasn't enough, resize and run again.
buffer.resize(bytes_written + 1);
bytes_written =
snprintf(&buffer[0], buffer.size(), pattern.c_str(), values...);
if (bytes_written < 0) {
throw std::runtime_error("FormatString failed");
}
}
// Truncate the string to the correct size to trim off the nul terminator.
buffer.resize(bytes_written);
return buffer;
}
// CheckpointOp is a wrapper over a SaveFloatTensorOp that basically allows
// flexible naming over iterations.
// The file pattern in db_name should be a format string that can be passed into
// sprintf with an int argument specifying the current iteration. An example:
// "/path/to/my/checkpoint/checkpoint_at_%d.pb"
template <class Context>
class CheckpointOp final : public Operator<Context> {
public:
explicit CheckpointOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
db_pattern_(this->template GetSingleArgument<string>("db", "")),
every_(this->template GetSingleArgument<int>("every", 1)),
ws_(ws),
save_op_def_(operator_def) {
CAFFE_ENFORCE_GT(
db_pattern_.size(), 0, "Must specify a checkpoint file pattern.");
CAFFE_ENFORCE_GT(every_, 0, "Checkpoint interval should be positive.");
if (every_ == 1) {
// Just issue a warning, but it's totally legal so we don't do anything.
LOG(WARNING) << "It seems that we are checkpointing every iteration. "
<< "Is that intended?";
}
save_op_def_.set_type("Save");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
int64_t iter =
this->template Input<Tensor>(0, CPU).template data<int64_t>()[0];
if (iter % every_ == 0) {
GetMutableArgument("db", true, &save_op_def_)
->set_s(FormatString(db_pattern_, iter));
SaveOp<Context> sub_op(save_op_def_, ws_);
return sub_op.Run();
} else {
return true;
}
}
private:
string db_pattern_;
int every_;
Workspace* ws_;
OperatorDef save_op_def_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LOAD_SAVE_OP_H_
| 14,136
| 32.030374
| 80
|
h
|
null |
pytorch-main/caffe2/operators/load_save_op_util.h
|
#ifndef CAFFE2_OPERATORS_LOAD_SAVE_OP_UTIL_H_
#define CAFFE2_OPERATORS_LOAD_SAVE_OP_UTIL_H_
#include <set>
#include <string>
#include <unordered_map>
#include "caffe2/core/blob.h"
#include "caffe2/core/blob_serialization.h"
namespace caffe2 {
namespace load_save_op_util {
struct BlobState {
int64_t total_size;
int64_t current_size;
bool is_tensor;
std::set<int32_t> seen_chunks_ids;
explicit BlobState(
int64_t total_size = 0,
int64_t current_size = 0,
bool is_tensor = false)
: total_size(total_size),
current_size(current_size),
is_tensor(is_tensor) {}
};
TORCH_API std::string buildBlobNameFromDbKey(
const std::string& dbKey,
const std::string& strip_prefix = "",
const std::string& add_prefix = "");
// We are tracking sizes of already read tensor parts while reading data
// chunks. This way we can make sure that all chunks were loaded in the end.
TORCH_API void ProcessBlob(
Blob* blob,
const BlobProto& proto,
std::unordered_map<std::string, BlobState>* blob_states_ptr,
const std::string& key,
int* loaded_blobs);
TORCH_API void prepareBlob(
Blob* blob,
std::unordered_map<std::string, BlobState>* blob_states_ptr,
const std::string& key);
TORCH_API void updateBlobStates(
const BlobProto& proto,
std::unordered_map<std::string, BlobState>* blob_states_ptr,
const std::string& key,
int* loaded_blobs);
TORCH_API void validateBlobStates(
const std::unordered_map<std::string, BlobState>& blob_states);
} // namespace load_save_op_util
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LOAD_SAVE_OP_UTIL_H_
| 1,642
| 25.934426
| 76
|
h
|
null |
pytorch-main/caffe2/operators/local_response_normalization_op.h
|
#ifndef CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_
#define CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class LRNOpBase : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LRNOpBase(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
size_(this->template GetSingleArgument<int>("size", 0)),
alpha_(this->template GetSingleArgument<float>("alpha", 0)),
beta_(this->template GetSingleArgument<float>("beta", 0)),
bias_(this->template GetSingleArgument<float>("bias", 1)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))),
pre_pad_((size_ - 1) / 2) {
TORCH_DCHECK_GT(size_, 0);
TORCH_DCHECK_EQ(size_ % 2, 1);
TORCH_DCHECK_GT(alpha_, 0);
TORCH_DCHECK_GT(beta_, 0);
}
bool RunOnDevice() override {
switch (order_) {
case StorageOrder::NHWC:
return RunOnDeviceWithOrderNHWC();
case StorageOrder::NCHW:
return RunOnDeviceWithOrderNCHW();
default:
LOG(FATAL) << "Unknown storage order: " << order_;
}
// To suppress old compiler warnings
return true;
}
virtual bool RunOnDeviceWithOrderNCHW() = 0;
virtual bool RunOnDeviceWithOrderNHWC() = 0;
protected:
const int size_;
const float alpha_;
const float beta_;
const float bias_;
const StorageOrder order_;
const int pre_pad_;
// Input: X; Output: Y, scale.
};
template <typename T, class Context>
class LRNOp final : public LRNOpBase<T, Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LRNOp(Args&&... args)
: LRNOpBase<T, Context>(std::forward<Args>(args)...) {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
protected:
// Input: X; Output: Y, scale.
OUTPUT_TAGS(OUTPUT, SCALE);
Tensor* scale_ = nullptr;
Tensor local_scale_tensor_{Context::GetDeviceType()};
};
template <typename T, class Context>
class LRNGradientOp final : public LRNOpBase<T, Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LRNGradientOp(Args&&... args)
: LRNOpBase<T, Context>(std::forward<Args>(args)...) {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
protected:
// Input: X, Y, scale, dY; Output: dX
INPUT_TAGS(INPUT, OUTPUT, SCALE, OUTPUT_GRAD);
Tensor* scale_ = nullptr;
Tensor local_scale_tensor_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LOCAL_RESPONSE_NORMALIZATION_OP_H_
| 2,828
| 28.46875
| 72
|
h
|
null |
pytorch-main/caffe2/operators/locally_connected_op.h
|
#ifndef CAFFE2_OPERATORS_LOCALLY_CONNECTED_OP_H_
#define CAFFE2_OPERATORS_LOCALLY_CONNECTED_OP_H_
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_op_shared.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/operators/locally_connected_op_util.h"
namespace caffe2 {
template <typename T, class Context>
class LocallyConnectedOp final : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
template <class... Args>
explicit LocallyConnectedOp(Args&&... args)
: ConvPoolOpBase<Context>(std::forward<Args>(args)...) {
// Since this is the default locally connected implementation, we will
// use CAFFE_ENFORCE instead of OPERATOR_NEEDS_FEATURE.
CAFFE_ENFORCE(
group_ == 1 || order_ == StorageOrder::NCHW,
"Group locally connected only supports NCHW order right now.");
}
~LocallyConnectedOp() override = default;
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
void RunOnDeviceWithOrderNCHWImpl(
const lc_op_util::ShapeParams& shape,
const T* X_data,
const T* filter_data,
const T* bias_data,
T* Y_data,
Tensor* column_buffer,
Tensor* column_transposed_buffer,
Tensor* output_buffer);
void RunOnDeviceWithOrderNHWCImpl(
const lc_op_util::ShapeParams& shape,
const T* X_data,
const T* filter_data,
const T* bias_data,
T* Y_data,
Tensor* column_buffer,
Tensor* column_transposed_buffer,
Tensor* Y_transposed_buffer);
Tensor bias_multiplier_{Context::GetDeviceType()};
// Buffer.
Tensor column_buffer_{Context::GetDeviceType()};
Tensor column_transposed_buffer_{Context::GetDeviceType()};
Tensor Y_transposed_buffer_{Context::GetDeviceType()};
// Input: X, W, b
// Output: Y
INPUT_TAGS(INPUT, FILTER, BIAS);
};
template <typename T, class Context>
class LocallyConnectedGradientOp final : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
template <class... Args>
explicit LocallyConnectedGradientOp(Args&&... args)
: ConvPoolOpBase<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "no_bias", no_bias_, false) {
CAFFE_ENFORCE(
!(no_bias_ && OutputSize() == 3),
"If bias is not present, you should not have 3 grad output.");
CAFFE_ENFORCE(
group_ == 1 || order_ == StorageOrder::NCHW,
"Group locally connected only supports NCHW order right now.");
}
~LocallyConnectedGradientOp() override = default;
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
void RunOnDeviceWithOrderNCHWImpl(
const lc_op_util::ShapeParams& shape,
const T* X_data,
const T* filter_data,
const T* dY_data,
T* dfilter_data,
T* dX_data,
T* dbias_data,
Tensor* column_buffer,
Tensor* column_transposed_buffer,
Tensor* dY_transposed_buffer);
void RunOnDeviceWithOrderNHWCImpl(
const lc_op_util::ShapeParams& shape,
const T* X_data,
const T* filter_data,
const T* dY_data,
T* dfilter_data,
T* dX_data,
T* dbias_data,
Tensor* column_buffer,
Tensor* column_transposed_buffer,
Tensor* dY_transposed_buffer);
const bool no_bias_;
Tensor bias_multiplier_{Context::GetDeviceType()};
// Buffer.
Tensor column_buffer_{Context::GetDeviceType()};
Tensor column_transposed_buffer_{Context::GetDeviceType()};
Tensor dY_transposed_buffer_{Context::GetDeviceType()};
// input: X, W, dY
// output: dW, db, and optionally dX
INPUT_TAGS(INPUT, FILTER, OUTPUT_GRAD);
OUTPUT_TAGS(FILTER_GRAD, BIAS_OR_INPUT_GRAD, INPUT_GRAD);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LOCALLY_CONNECTED_OP_H_
| 3,890
| 28.477273
| 74
|
h
|
null |
pytorch-main/caffe2/operators/locally_connected_op_util.h
|
#ifndef CAFFE2_OPERATORS_LOCALLY_CONNECTED_OP_UTIL_H_
#define CAFFE2_OPERATORS_LOCALLY_CONNECTED_OP_UTIL_H_
#include <vector>
#include "caffe2/core/types.h"
namespace caffe2 {
namespace lc_op_util {
struct ShapeParams {
int N;
int C;
int M;
int input_image_size;
int output_image_size;
int kernel_size;
std::vector<int> X_dims;
std::vector<int> column_slice_dims;
std::vector<int> column_dims;
std::vector<int> column_transposed_dims;
std::vector<int> column_axes;
std::vector<int> Y_dims;
std::vector<int> Y_transposed_dims;
std::vector<int> Y_axes;
};
struct CUDAConvNetShapeParams {
int N;
int C;
int M;
int X_H;
int X_W;
int Y_H;
int Y_W;
};
TORCH_API void SetColumnBufferShape(
int N,
int kernel_dim,
int output_image_size,
const std::vector<int>& output_image_dims,
StorageOrder order,
std::vector<int>* column_slice_dims,
std::vector<int>* column_dims,
std::vector<int>* column_transposed_dims,
std::vector<int>* column_axes);
TORCH_API void SetYBufferShape(
int N,
int M,
int output_image_size,
StorageOrder order,
std::vector<int>* Y_dims,
std::vector<int>* Y_transposed_dims,
std::vector<int>* Y_axes);
} // namespace lc_op_util
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LOCALLY_CONNECTED_OP_UTIL_H_
| 1,332
| 20.5
| 55
|
h
|
null |
pytorch-main/caffe2/operators/logit_op.h
|
#ifndef CAFFE2_OPERATORS_LOGIT_OP_H_
#define CAFFE2_OPERATORS_LOGIT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/elementwise_ops.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(Logit)
namespace caffe2 {
template <class Context>
struct LogitFunctor {
explicit LogitFunctor(OperatorBase& op)
: eps_(op.GetSingleArgument<float>("eps", 1e-6f)) {
CAFFE_ENFORCE_GT(eps_, 0.0);
CAFFE_ENFORCE_LT(eps_, 0.5);
}
template <typename T>
bool operator()(const int size, const T* X, T* Y, Context* context) const;
const float eps_;
};
template <typename T, class Context>
class LogitGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LogitGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
eps_(this->template GetSingleArgument<float>("eps", 1e-6f)) {}
~LogitGradientOp() override {}
bool RunOnDevice() override;
protected:
float eps_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LOGIT_OP_H_
| 1,138
| 23.76087
| 76
|
h
|
null |
pytorch-main/caffe2/operators/loss_op.h
|
#ifndef CAFFE2_OPERATORS_LOSS_OP_H_
#define CAFFE2_OPERATORS_LOSS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/reduction_ops.h"
#include "caffe2/operators/utility_ops.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// AveragedLoss takes in the input and produces the output loss value as
// the average of the input.
template <typename T, class Context>
class AveragedLoss final : public SumElementsOp<T, Context> {
public:
template <class... Args>
explicit AveragedLoss(Args&&... args)
: SumElementsOp<T, Context>(std::forward<Args>(args)..., true) {}
~AveragedLoss() {}
};
template <typename T, class Context>
class AveragedLossGradient final : public SumElementsGradientOp<T, Context> {
public:
template <class... Args>
explicit AveragedLossGradient(Args&&... args)
: SumElementsGradientOp<T, Context>(std::forward<Args>(args)..., true) {}
~AveragedLossGradient() {}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LOSS_OP_H_
| 1,058
| 28.416667
| 79
|
h
|
null |
pytorch-main/caffe2/operators/lpnorm_op.h
|
#ifndef CAFFE2_OPERATORS_LPNORM_OP_H_
#define CAFFE2_OPERATORS_LPNORM_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class LpNormOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LpNormOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "p", p_, 2),
OP_SINGLE_ARG(bool, "average", average_, false) {
CAFFE_ENFORCE(p_ == 1 || p_ == 2, "p should be either 1 or 2.");
}
bool RunOnDevice() override;
protected:
const int p_;
const bool average_;
};
template <typename T, class Context>
class LpNormGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit LpNormGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "p", p_, 2),
OP_SINGLE_ARG(bool, "average", average_, false) {
CAFFE_ENFORCE(p_ == 1 || p_ == 2, "p should be either 1 or 2.");
}
bool RunOnDevice() override;
protected:
const int p_;
const bool average_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LPNORM_OP_H_
| 1,279
| 24.098039
| 68
|
h
|
null |
pytorch-main/caffe2/operators/lstm_unit_op.h
|
#ifndef CAFFE2_OPERATORS_LSTM_UNIT_OP_H_
#define CAFFE2_OPERATORS_LSTM_UNIT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/perfkernels/lstm_unit_cpu.h"
#include "caffe2/utils/conversions.h"
namespace caffe2 {
namespace detail {
template <typename T, typename Context>
inline void LSTMUnit(
const int N,
const int D,
const int t,
const T* H_prev,
const T* C_prev,
const T* X,
const int32_t* seqLengths,
const bool drop_states,
T* C,
T* H,
const float forget_bias,
Context* /*context*/) {
LstmUnitCpu<T>(
N, D, t, H_prev, C_prev, X, seqLengths, drop_states, C, H, forget_bias);
}
template <typename T, typename Context>
inline void LSTMUnitGradient(
int N,
int D,
int t,
const T* C_prev,
const T* X,
const int32_t* seqLengths,
const T* C,
const T* H,
const T* C_diff,
const T* H_diff,
bool drop_states,
T* H_prev_diff,
T* C_prev_diff,
T* X_diff,
const float forget_bias,
Context* /*context*/) {
LstmUnitGradientCpu<T>(
N,
D,
t,
C_prev,
X,
seqLengths,
C,
H,
C_diff,
H_diff,
drop_states,
H_prev_diff,
C_prev_diff,
X_diff,
forget_bias);
}
} // namespace detail
template <typename Context>
class LSTMUnitOp : public Operator<Context> {
public:
explicit LSTMUnitOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
forget_bias_(static_cast<float>(
this->template GetSingleArgument<float>("forget_bias", 0.0))),
sequence_lengths_(
this->template GetSingleArgument<bool>("sequence_lengths", true)),
drop_states_(
this->template GetSingleArgument<bool>("drop_states", false)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
using Operator<Context>::Operator;
template <typename T>
bool DoRunWithType() {
// handle potentially-missing sequence lengths input
const size_t TIMESTEP = SEQ_LENGTHS + (sequence_lengths_ ? 1 : 0);
// Extract N
const auto N = Input(CELL_T_M_1).size(1);
// Gates: 1xNxG
const auto G = Input(GATES).size(2);
const auto D = Input(CELL_T_M_1).size(2);
CAFFE_ENFORCE_EQ(4 * D, G);
const auto* H_prev = Input(HIDDEN_T_M_1).template data<T>();
const auto* C_prev = Input(CELL_T_M_1).template data<T>();
const auto* X = Input(GATES).template data<T>();
const int32_t* seqLengths = nullptr;
if (sequence_lengths_) {
CAFFE_ENFORCE_EQ(Input(SEQ_LENGTHS).numel(), N);
seqLengths = Input(SEQ_LENGTHS).template data<int32_t>();
}
const auto t = static_cast<OperatorBase*>(this)
->Input<Tensor>(TIMESTEP, CPU)
.template data<int32_t>()[0];
Output(CELL_T)->ResizeLike(Input(CELL_T_M_1));
auto* C = Output(CELL_T)->template mutable_data<T>();
Output(HIDDEN_T)->ResizeLike(Input(CELL_T_M_1));
auto* H = Output(HIDDEN_T)->template mutable_data<T>();
detail::LSTMUnit<T, Context>(
N,
D,
t,
H_prev,
C_prev,
X,
seqLengths,
drop_states_,
C,
H,
forget_bias_,
&context_);
return true;
}
bool RunOnDevice() override {
return DoRunWithType<float>();
}
protected:
INPUT_TAGS(HIDDEN_T_M_1, CELL_T_M_1, GATES, SEQ_LENGTHS);
// additional input tags are determined dynamically based on whether
// sequence_lengths is present.
OUTPUT_TAGS(HIDDEN_T, CELL_T);
float forget_bias_;
bool sequence_lengths_;
private:
bool drop_states_;
};
template <typename Context>
class LSTMUnitGradientOp : public Operator<Context> {
public:
template <class... Args>
explicit LSTMUnitGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
forget_bias_(static_cast<float>(
this->template GetSingleArgument<float>("forget_bias", 0.0))),
sequence_lengths_(
this->template GetSingleArgument<bool>("sequence_lengths", true)),
drop_states_(
this->template GetSingleArgument<bool>("drop_states", false)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <typename T>
bool DoRunWithType() {
// handle potentially-missing sequence lengths input
const size_t inputOffset = SEQ_LENGTHS + (sequence_lengths_ ? 1 : 0);
const size_t TIMESTEP = inputOffset;
const size_t HIDDEN_T = inputOffset + 1;
const size_t CELL_T = inputOffset + 2;
const size_t HIDDEN_T_GRAD = inputOffset + 3;
const size_t CELL_T_GRAD = inputOffset + 4;
// Extract N
const auto N = Input(CELL_T_M_1).size(1);
// Gates: 1xNxG
const auto G = Input(GATES).size(2);
const auto D = Input(CELL_T_M_1).size(2);
CAFFE_ENFORCE_EQ(4 * D, G);
const auto* C_prev = Input(CELL_T_M_1).template data<T>();
const auto* X = Input(GATES).template data<T>();
const auto t = static_cast<OperatorBase*>(this)
->Input<Tensor>(TIMESTEP, CPU)
.template data<int32_t>()[0];
const auto* C = Input(CELL_T).template data<T>();
const auto* H = Input(HIDDEN_T).template data<T>();
const auto* C_diff = Input(CELL_T_GRAD).template data<T>();
const auto* H_diff = Input(HIDDEN_T_GRAD).template data<T>();
const int32_t* seqLengths = nullptr;
if (sequence_lengths_) {
CAFFE_ENFORCE_EQ(Input(SEQ_LENGTHS).numel(), N);
seqLengths = Input(SEQ_LENGTHS).template data<int32_t>();
}
Output(HIDDEN_T_M_1_GRAD)->ResizeLike(Input(HIDDEN_T_M_1));
auto* H_prev_diff = Output(HIDDEN_T_M_1_GRAD)->template mutable_data<T>();
Output(CELL_T_M_1_GRAD)->ResizeLike(Input(CELL_T_M_1));
auto* C_prev_diff = Output(CELL_T_M_1_GRAD)->template mutable_data<T>();
Output(GATES_GRAD)->ResizeLike(Input(GATES));
auto* X_diff = Output(GATES_GRAD)->template mutable_data<T>();
detail::LSTMUnitGradient<T, Context>(
N,
D,
t,
C_prev,
X,
seqLengths,
C,
H,
C_diff,
H_diff,
drop_states_,
H_prev_diff,
C_prev_diff,
X_diff,
forget_bias_,
&context_);
return true;
}
bool RunOnDevice() override {
return DoRunWithType<float>();
}
protected:
INPUT_TAGS(HIDDEN_T_M_1, CELL_T_M_1, GATES, SEQ_LENGTHS);
// additional input tags are determined dynamically based on whether
// sequence_lengths is present.
OUTPUT_TAGS(HIDDEN_T_M_1_GRAD, CELL_T_M_1_GRAD, GATES_GRAD);
float forget_bias_;
bool sequence_lengths_;
private:
bool drop_states_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LSTM_UNIT_OP_H_
| 6,733
| 27.294118
| 78
|
h
|
null |
pytorch-main/caffe2/operators/lstm_utils.h
|
#include <algorithm>
#include <vector>
#include "caffe2/core/tensor.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
using t_tuple = std::tuple<Tensor, Tensor>;
template <typename T>
T copy_ctor(const T& x) {
return x;
}
template <>
Tensor copy_ctor(const Tensor& X) {
return X.UnsafeSharedInstance();
}
template <>
t_tuple copy_ctor(const t_tuple& X) {
return std::make_tuple(copy_ctor(std::get<0>(X)), copy_ctor(std::get<1>(X)));
}
template <>
std::pair<t_tuple, t_tuple> copy_ctor(const std::pair<t_tuple, t_tuple>& X) {
return std::make_pair(copy_ctor(X.first), copy_ctor(X.second));
}
template <>
std::vector<Tensor> copy_ctor(const std::vector<Tensor>& X) {
std::vector<Tensor> Y(X.size());
std::transform(X.begin(), X.end(), Y.begin(), [](const Tensor& x) {
return copy_ctor(x);
});
return Y;
}
template <>
std::vector<t_tuple> copy_ctor(const std::vector<t_tuple>& X) {
std::vector<t_tuple> Y(X.size());
std::transform(X.begin(), X.end(), Y.begin(), [](const t_tuple& x) {
return copy_ctor(x);
});
return Y;
}
template <>
std::vector<std::pair<t_tuple, t_tuple>> copy_ctor(
const std::vector<std::pair<t_tuple, t_tuple>>& X) {
std::vector<std::pair<t_tuple, t_tuple>> Y(X.size());
std::transform(
X.begin(), X.end(), Y.begin(), [](const std::pair<t_tuple, t_tuple>& x) {
return copy_ctor(x);
});
return Y;
}
// Gathers every two elements of a vector in a vector of pairs
template <typename T>
static std::vector<std::pair<T, T>> pair_vec(const std::vector<T>& vals) {
CAFFE_ENFORCE_EQ(
vals.size() % 2,
0,
"Odd number of params or hiddens given to a bidirectional RNN");
std::vector<std::pair<T, T>> result;
result.reserve(vals.size() / 2);
for (int64_t i = 0; i < vals.size(); i += 2) {
result.emplace_back(copy_ctor(vals[i]), copy_ctor(vals[i + 1]));
}
return result;
}
// Flattens a vector of pairs
template <typename T>
static std::vector<T> unpair_vec(std::vector<std::pair<T, T>>&& vals) {
std::vector<T> result;
result.reserve(vals.size() * 2);
for (const auto i : c10::irange(vals.size())) {
result.push_back(std::move(vals[i].first));
result.push_back(std::move(vals[i].second));
}
return result;
}
Tensor matmul(const Tensor& X, const Tensor& W, CPUContext* context) {
const auto canonical_axis = X.canonical_axis_index(1);
const auto M = X.size_to_dim(canonical_axis);
const auto K = X.size_from_dim(canonical_axis);
const auto canonical_axis_w = W.canonical_axis_index(1);
const int N = W.size_to_dim(canonical_axis_w);
auto output_size = X.sizes().vec();
output_size.resize(canonical_axis + 1);
output_size[canonical_axis] = N;
Tensor C(output_size, CPU);
math::Gemm<float, CPUContext>(
CblasNoTrans,
CblasTrans,
M,
N,
K,
1,
X.template data<float>(),
W.template data<float>(),
0,
C.template mutable_data<float>(),
context);
return C;
}
Tensor
linear(const Tensor& X, const Tensor& W, const Tensor& B, CPUContext* context) {
auto output = matmul(X, W, context);
if (B) {
const auto canonical_axis = X.canonical_axis_index(1);
const auto M = X.size_to_dim(canonical_axis);
const auto canonical_axis_w = W.canonical_axis_index(1);
const int N = W.size_to_dim(canonical_axis_w);
auto bias_multiplier_ = caffe2::empty({M}, CPU);
math::Set<float, CPUContext>(
M, 1, bias_multiplier_.template mutable_data<float>(), context);
math::Gemm<float, CPUContext>(
CblasNoTrans,
CblasNoTrans,
M,
N,
1,
1,
bias_multiplier_.template data<float>(),
B.template data<float>(),
1,
output.template mutable_data<float>(),
context);
}
return output;
}
std::vector<Tensor>
chunk(const Tensor& input, int chunks, int axis, CPUContext* context) {
int canonical_axis = input.canonical_axis_index(axis);
CAFFE_ENFORCE_LT(
canonical_axis, input.dim(), "Axis not in input ndim range.");
const int input_channels = input.dim32(canonical_axis);
CAFFE_ENFORCE_EQ(
input_channels % chunks,
0,
"input channels should be divisible by the number of chunks.");
auto split_size = input_channels / chunks;
vector<int64_t> output_dims(input.sizes().vec());
int before = 1, after = 1;
for (const auto i : c10::irange(canonical_axis)) {
before *= input.dim32(i);
}
for (int i = canonical_axis + 1; i < input.dim(); ++i) {
after *= input.dim32(i);
}
size_t input_offset = 0;
std::vector<Tensor> outputs;
for (const auto i : c10::irange(chunks)) {
(void)i; // Suppress unused variable warning
auto axis_dim = split_size;
output_dims[canonical_axis] = split_size;
Tensor output(output_dims, CPU);
math::CopyMatrix<CPUContext>(
input.itemsize(),
before,
axis_dim * after,
static_cast<const char*>(input.raw_data()) + input_offset,
input.dim32(canonical_axis) * after,
output.raw_mutable_data(input.dtype()),
axis_dim * after,
context,
input.dtype().copy());
input_offset += axis_dim * after * input.itemsize();
outputs.push_back(std::move(output));
}
return outputs;
}
std::vector<Tensor> unbind(const Tensor& input, int axis, CPUContext* context) {
// 1 - Chunk the input tensor along the given axis into N chunks where
// N is the dim(axis)
auto chunks = chunk(input, input.sizes()[axis], axis, context);
// 2 - Compute new dimensions
std::vector<int64_t> newDims = input.sizes().vec();
newDims.erase(newDims.begin() + axis);
// 3 - Reshape chunks to drop the extra dimension
for (const auto i : c10::irange(chunks.size())) {
CAFFE_ENFORCE_EQ(
chunks[i].sizes()[axis], 1, "Got an unexpected chunk size");
chunks[i].Reshape(newDims);
}
return chunks;
}
Tensor
cat(const std::vector<Tensor>& tensorList, int axis, CPUContext* context) {
// Adopted from C2's concat operator
auto input_zero = copy_ctor(tensorList.at(0));
vector<int64_t> outputDims(input_zero.sizes().vec());
CAFFE_ENFORCE(outputDims.size() > 0);
for (const auto i : c10::irange(1, tensorList.size())) {
CAFFE_ENFORCE(input_zero.dtype() == tensorList.at(i).dtype());
outputDims[axis] += tensorList.at(i).sizes()[axis];
}
auto output_channels = outputDims[axis];
Tensor output(outputDims, CPU);
int before = 1, after = 1;
for (const auto i : c10::irange(tensorList.at(0).dim())) {
if (i == axis) {
continue;
}
int dim = input_zero.dim32(i);
if (i < axis) {
before *= dim;
} else {
after *= dim;
}
}
size_t output_offset = 0;
for (const auto& input : tensorList) {
auto axis_dim = input.dim32(axis);
math::CopyMatrix<CPUContext>(
input.itemsize(),
before,
axis_dim * after,
input.raw_data(),
axis_dim * after,
static_cast<char*>(output.raw_mutable_data(input_zero.dtype())) +
output_offset,
output_channels * after,
context,
input_zero.dtype().copy());
output_offset += axis_dim * after * input.itemsize();
}
return output;
}
Tensor
stack(const std::vector<Tensor>& tensorList, int axis, CPUContext* context) {
// 1 - Compute new dimensions
std::vector<int64_t> newDims(tensorList[0].sizes().vec());
std::vector<Tensor> expandedTensorList;
newDims.insert(newDims.begin() + axis, 1);
for (const auto i : c10::irange(tensorList.size())) {
expandedTensorList.emplace_back(tensorList[i].Clone());
expandedTensorList.at(i).Reshape(newDims);
}
return cat(expandedTensorList, axis, context);
}
Tensor sigmoid(const Tensor& X) {
Tensor Y(X.sizes(), CPU);
auto N = X.numel();
EigenVectorArrayMap<float>(Y.template mutable_data<float>(), N) = 1.0 /
(1.0 +
(-ConstEigenVectorArrayMap<float>(X.template data<float>(), N)).exp());
return Y;
}
Tensor tanh(const Tensor& X, CPUContext* context) {
Tensor Y(X.sizes(), CPU);
math::Tanh<float, CPUContext>(
X.numel(),
X.template data<float>(),
Y.template mutable_data<float>(),
context);
return Y;
}
Tensor add(const Tensor& X, const Tensor& Y, CPUContext* context) {
Tensor Z(X.sizes().vec(), CPU);
math::Add<float, CPUContext>(
X.numel(),
X.template data<float>(),
Y.template data<float>(),
Z.template mutable_data<float>(),
context);
return Z;
}
Tensor mul(const Tensor& X, const Tensor& Y, CPUContext* context) {
Tensor Z(X.sizes().vec(), CPU);
math::Mul<float, CPUContext>(
X.numel(),
X.template data<float>(),
Y.template data<float>(),
Z.template mutable_data<float>(),
context);
return Z;
}
Tensor transpose(const Tensor& X, int dim0, int dim1, CPUContext* context) {
int ndim = X.dim();
CAFFE_ENFORCE(ndim > dim0 && ndim > dim1, "Invalid transpose dimensions");
std::vector<int> axes(ndim);
std::iota(axes.begin(), axes.end(), 0);
std::swap(axes[dim0], axes[dim1]);
const std::vector<std::int64_t> X_dims = X.sizes().vec();
std::vector<std::int64_t> Y_dims(ndim);
for (const auto i : c10::irange(ndim)) {
Y_dims[i] = X_dims[axes[i]];
}
Tensor Y(Y_dims, CPU);
math::Transpose<std::int64_t, float, CPUContext>(
ndim,
X_dims.data(),
axes.data(),
X.template data<float>(),
Y.template mutable_data<float>(),
context);
return Y;
}
} // namespace
} // namespace caffe2
| 9,536
| 28.803125
| 80
|
h
|
null |
pytorch-main/caffe2/operators/map_ops.h
|
#ifndef CAFFE2_OPERATORS_MAP_OPS_H_
#define CAFFE2_OPERATORS_MAP_OPS_H_
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include <c10/util/irange.h>
#include <algorithm>
#include <iterator>
#include <string>
#include <typeinfo>
#include <unordered_map>
#include <utility>
#include <vector>
namespace caffe2 {
template <typename T>
struct TypeNameTraits {
static constexpr const char* name = "unknown";
};
template <>
struct TypeNameTraits<int64_t> {
static constexpr const char* name = "int64_t";
};
template <>
struct TypeNameTraits<int32_t> {
static constexpr const char* name = "int32_t";
};
template <typename KEY_T, typename VALUE_T>
struct MapTypeTraits {
using MapType = std::unordered_map<KEY_T, VALUE_T>;
static string MapTypeName() {
return string("(std::unordered_map<") + TypeNameTraits<KEY_T>::name + ", " +
TypeNameTraits<VALUE_T>::name + ">)";
}
};
using MapType64To64 = MapTypeTraits<int64_t, int64_t>::MapType;
using MapType64To32 = MapTypeTraits<int64_t, int32_t>::MapType;
using MapType32To32 = MapTypeTraits<int32_t, int32_t>::MapType;
using MapType32To64 = MapTypeTraits<int32_t, int64_t>::MapType;
template <class Context>
class CreateMapOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit CreateMapOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
~CreateMapOp() override {}
bool RunOnDevice() override {
TensorProto::DataType key_dtype = static_cast<TensorProto::DataType>(
this->template GetSingleArgument<int>(
"key_dtype", TensorProto_DataType_INT32));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, DataTypeToTypeMeta(key_dtype));
}
template <typename KEY_T>
bool DoRunWithType() {
TensorProto::DataType value_dtype = static_cast<TensorProto::DataType>(
this->template GetSingleArgument<int>(
"value_dtype", TensorProto_DataType_INT32));
return DispatchHelper<
TensorTypes2<int32_t, int64_t, GenericTensorImplementation>,
KEY_T>::call(this, DataTypeToTypeMeta(value_dtype));
}
template <typename KEY_T, typename VALUE_T>
bool DoRunWithType2() {
// clear to make sure the map is empty
this->template Output<typename MapTypeTraits<KEY_T, VALUE_T>::MapType>(MAP)
->clear();
return true;
}
template <typename KEY_T>
bool DoRunWithOtherType2() {
TensorProto::DataType value_dtype = static_cast<TensorProto::DataType>(
this->template GetSingleArgument<int>(
"value_dtype", TensorProto_DataType_INT32));
CAFFE_THROW(
"CreateMap is not implemented on value tensor of type ",
DataTypeToTypeMeta(value_dtype).name(),
"consider adding it as a type in the DispatchHelper list");
}
OUTPUT_TAGS(MAP);
};
template <class Context>
class KeyValueToMapOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit KeyValueToMapOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
~KeyValueToMapOp() override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(KEYS));
}
template <typename KEY_T>
bool DoRunWithType() {
return DispatchHelper<
TensorTypes2<int32_t, int64_t, GenericTensorImplementation>,
KEY_T>::call(this, Input(VALUES));
}
template <typename KEY_T, typename VALUE_T>
bool DoRunWithType2() {
using MapType = typename MapTypeTraits<KEY_T, VALUE_T>::MapType;
const auto& key_input = Input(KEYS);
const auto& value_input = Input(VALUES);
CAFFE_ENFORCE_EQ(key_input.numel(), value_input.numel());
auto* key_data = key_input.template data<KEY_T>();
auto* value_data = value_input.template data<VALUE_T>();
auto* map_data = this->template Output<MapType>(MAP);
for (const auto i : c10::irange(key_input.numel())) {
map_data->emplace(key_data[i], value_data[i]);
}
return true;
}
template <typename KEY_T>
bool DoRunWithOtherType2() {
CAFFE_THROW(
"KeyValueToMap is not implemented on value tensor of type ",
Input(VALUES).dtype().name(),
"consider adding it as a type in the DispatchHelper list");
}
INPUT_TAGS(KEYS, VALUES);
OUTPUT_TAGS(MAP);
};
template <class Context>
class MapToKeyValueOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit MapToKeyValueOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
~MapToKeyValueOp() override {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<
MapType64To64,
MapType64To32,
MapType32To32,
MapType32To64>>::call(this, OperatorBase::InputBlob(MAP));
}
template <typename MAP_T>
bool DoRunWithType() {
using key_type = typename MAP_T::key_type;
using mapped_type = typename MAP_T::mapped_type;
auto& map_data = this->template Input<MAP_T>(MAP);
auto* key_output = Output(
KEYS, {static_cast<int64_t>(map_data.size())}, at::dtype<key_type>());
auto* value_output = Output(
VALUES,
{static_cast<int64_t>(map_data.size())},
at::dtype<mapped_type>());
auto* key_data = key_output->template mutable_data<key_type>();
auto* value_data = value_output->template mutable_data<mapped_type>();
for (const auto& it : map_data) {
*key_data = it.first;
*value_data = it.second;
key_data++;
value_data++;
}
return true;
}
INPUT_TAGS(MAP);
OUTPUT_TAGS(KEYS, VALUES);
};
template <typename KEY_T, typename VALUE_T>
class MapSerializer : public BlobSerializerBase {
public:
using MapType = typename MapTypeTraits<KEY_T, VALUE_T>::MapType;
void Serialize(
const void* pointer,
TypeMeta typeMeta,
const string& name,
BlobSerializerBase::SerializationAcceptor acceptor) override {
CAFFE_ENFORCE(typeMeta.Match<MapType>());
const MapType& map_data = *static_cast<const MapType*>(pointer);
int64_t sz = map_data.size();
Tensor key_tensor(CPU);
key_tensor.Resize(sz);
Tensor value_tensor(CPU);
value_tensor.Resize(sz);
auto* key_data = key_tensor.mutable_data<KEY_T>();
auto* value_data = value_tensor.mutable_data<VALUE_T>();
for (const auto& it : map_data) {
*key_data = it.first;
*value_data = it.second;
key_data++;
value_data++;
}
TensorProtos tensor_protos;
TensorSerializer ser;
ser.Serialize(
key_tensor, name, tensor_protos.add_protos(), 0, key_tensor.numel());
ser.Serialize(
value_tensor,
name,
tensor_protos.add_protos(),
0,
value_tensor.numel());
BlobProto blob_proto;
blob_proto.set_name(name);
blob_proto.set_type(MapTypeTraits<KEY_T, VALUE_T>::MapTypeName());
blob_proto.set_content(SerializeAsString_EnforceCheck(tensor_protos));
acceptor(name, SerializeBlobProtoAsString_EnforceCheck(blob_proto));
}
};
template <typename KEY_T, typename VALUE_T>
class MapDeserializer : public BlobDeserializerBase {
public:
using MapType = typename MapTypeTraits<KEY_T, VALUE_T>::MapType;
void Deserialize(const BlobProto& proto, Blob* blob) override {
TensorProtos tensor_protos;
CAFFE_ENFORCE(
tensor_protos.ParseFromString(proto.content()),
"Fail to parse TensorProtos");
TensorDeserializer deser;
Tensor key_tensor = deser.Deserialize(tensor_protos.protos(0));
Tensor value_tensor = deser.Deserialize(tensor_protos.protos(1));
auto* key_data = key_tensor.data<KEY_T>();
auto* value_data = value_tensor.data<VALUE_T>();
auto* map_ptr = blob->template GetMutable<MapType>();
for (const auto i : c10::irange(key_tensor.numel())) {
map_ptr->emplace(key_data[i], value_data[i]);
}
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_MAP_OPS_H_
| 8,083
| 28.940741
| 80
|
h
|
null |
pytorch-main/caffe2/operators/margin_loss_l2r_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SessionMarginLossOp final : public Operator<Context> {
public:
template <class... Args>
explicit SessionMarginLossOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
margin_(this->template GetSingleArgument<float>("margin", 1.0)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
private:
INPUT_TAGS(PRED, LABEL, SESSION_LENS);
OUTPUT_TAGS(LOSS, DPRED);
void ResizeInvLogITensor(int);
void ComputeDiscounts(int*, int);
float SessionMarginLoss(
int start_index,
int end_index,
const Tensor& pred,
const Tensor& label,
Tensor** dpred);
float margin_;
Tensor label_relation_sign_;
Tensor margin_diff_;
};
template <typename T, class Context>
class SessionMarginLossGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(SessionMarginLossGradientOp)
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
private:
INPUT_TAGS(PRED, SESSION_LENS, PRECOMPUTED_DPRED, DLOSS);
OUTPUT_TAGS(DPRED);
};
} // namespace caffe2
| 1,324
| 24.480769
| 74
|
h
|
null |
pytorch-main/caffe2/operators/margin_ranking_criterion_op.h
|
#ifndef CAFFE2_OPERATORS_MARGIN_RANKING_CRITERION_OP_H_
#define CAFFE2_OPERATORS_MARGIN_RANKING_CRITERION_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class MarginRankingCriterionOp final : public Operator<Context> {
public:
template <class... Args>
explicit MarginRankingCriterionOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(float, "margin", margin_, 1.0) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
float margin_;
};
template <class Context>
class MarginRankingCriterionGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit MarginRankingCriterionGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(float, "margin", margin_, 1.0) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
float margin_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_MARGIN_RANKING_CRITERION_OP_H_
| 1,113
| 24.906977
| 73
|
h
|
null |
pytorch-main/caffe2/operators/matmul_op.h
|
#ifndef CAFFE2_OPERATORS_MATMUL_OP_H_
#define CAFFE2_OPERATORS_MATMUL_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context, class Engine = DefaultEngine>
class MatMulOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit MatMulOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axis_a_(this->template GetSingleArgument<int>("axis_a", 1)),
axis_b_(this->template GetSingleArgument<int>("axis_b", 1)),
trans_a_(this->template GetSingleArgument<int>("trans_a", 0)),
trans_b_(this->template GetSingleArgument<int>("trans_b", 0)) {}
~MatMulOp() override {}
bool RunOnDevice() override {
const auto& A = Input(0);
const auto& B = Input(1);
const auto canonical_axis_a = A.canonical_axis_index(axis_a_);
const auto canonical_axis_b = B.canonical_axis_index(axis_b_);
int A_dim0 = A.size_to_dim(canonical_axis_a);
int A_dim1 = A.size_from_dim(canonical_axis_a);
int B_dim0 = B.size_to_dim(canonical_axis_b);
int B_dim1 = B.size_from_dim(canonical_axis_b);
int a_dim0, a_dim1, b_dim0, b_dim1;
if (trans_a_) {
a_dim0 = A_dim1;
a_dim1 = A_dim0;
} else {
a_dim0 = A_dim0;
a_dim1 = A_dim1;
}
if (trans_b_) {
b_dim0 = B_dim1;
b_dim1 = B_dim0;
} else {
b_dim0 = B_dim0;
b_dim1 = B_dim1;
}
auto dimErrorString = [&]() {
return c10::str(
"Dimension mismatch: ",
trans_a_ ? "trans(A): " : "A: ",
a_dim0,
" ",
a_dim1,
trans_b_ ? ", trans(B): " : ", B: ",
b_dim0,
" ",
b_dim1);
};
// Error checking
CAFFE_ENFORCE(a_dim1 == b_dim0, dimErrorString());
Y_shape_cache_[0] = a_dim0;
Y_shape_cache_[1] = b_dim1;
auto* Y = Output(0, Y_shape_cache_, at::dtype<T>());
CAFFE_ENFORCE(a_dim0 * b_dim1 == Y->numel(), dimErrorString());
// Y = A * B
math::Gemm<T, Context, Engine>(
trans_a_ ? CblasTrans : CblasNoTrans,
trans_b_ ? CblasTrans : CblasNoTrans,
a_dim0,
b_dim1,
a_dim1,
1,
A.template data<T>(),
B.template data<T>(),
0,
Y->template mutable_data<T>(),
&context_);
if (InputSize() == 3) {
// In gradient op, resize to input
Y->ResizeLike(Input(2));
}
return true;
}
protected:
// A local vector to cache the output shape so we don't need to recreate
// a vector object every time we run Run().
vector<int64_t> Y_shape_cache_{0, 0};
int axis_a_{1};
int axis_b_{1};
bool trans_a_;
bool trans_b_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_MATMUL_OP_H_
| 2,852
| 26.171429
| 74
|
h
|
null |
pytorch-main/caffe2/operators/max_pool_with_index_gpu.h
|
#pragma once
#include <cfloat>
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/operators/pool_op.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
class MaxPoolWithIndexOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
MaxPoolWithIndexOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws) {}
~MaxPoolWithIndexOp() {}
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override;
// Input: X
// Output: Y, mask
};
class MaxPoolWithIndexGradientOp final : public ConvPoolOpBase<CUDAContext> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CUDAContext);
MaxPoolWithIndexGradientOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CUDAContext>(operator_def, ws) {}
~MaxPoolWithIndexGradientOp() {}
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override;
// Input: X, dY, mask
// Output: dX
};
}; // namespace caffe2
| 1,155
| 23.595745
| 77
|
h
|
null |
pytorch-main/caffe2/operators/mean_op.h
|
#ifndef CAFFE2_OPERATORS_MEAN_OPS_H_
#define CAFFE2_OPERATORS_MEAN_OPS_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/proto_utils.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class MeanOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(MeanOp)
template <typename T>
bool DoRunWithType() {
auto& input0 = Input(0);
auto* output = Output(0, input0.sizes(), at::dtype<T>());
output->CopyFrom(input0, true /*async*/);
if (InputSize() == 1) {
return true;
}
// Dimension checking
for (const auto i : c10::irange(1, InputSize())) {
if (output->sizes() != Input(i).sizes()) {
CAFFE_THROW(
"Check failed: output->sizes() == Input(i).sizes().",
"Description: Input #",
i,
", input dimension:",
Input(i).sizes(),
" should match output dimension: ",
output->sizes());
}
}
T* output_data = output->template mutable_data<T>();
for (const auto i : c10::irange(1, InputSize())) {
math::Add(
output->numel(),
output_data,
Input(i).template data<T>(),
output_data,
&context_);
}
math::Scale(
output->numel(),
1.0f / InputSize(),
output_data,
output_data,
&context_);
return true;
}
bool RunOnDevice() override {
if (Input(0).template IsType<float>()) {
return DoRunWithType<float>();
} else if (Input(0).template IsType<double>()) {
return DoRunWithType<double>();
} else {
CAFFE_THROW(
"Mean operator only supports 32-bit float or 64-bit double, but",
" input was of type ",
Input(0).dtype().name());
}
}
};
template <class Context>
class MeanGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit MeanGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
template <typename T>
bool DoRunWithType() {
auto& dY = Input(0);
const auto* dY_data = dY.template data<T>();
int size = dY.numel();
int num_inputs = OutputSize();
float scale = 1.0f / num_inputs;
// dX0 = scale * dY
auto* dX0 = Output(0, dY.sizes(), at::dtype<T>());
math::Scale(
size, scale, dY_data, dX0->template mutable_data<T>(), &context_);
// Copy the rest dX
for (const auto i : c10::irange(1, num_inputs)) {
auto* cur_dX = Output(i);
cur_dX->ResizeLike(dY);
cur_dX->CopyFrom(*dX0, true /*async*/);
}
return true;
}
bool RunOnDevice() override {
if (Input(0).template IsType<float>()) {
return DoRunWithType<float>();
} else if (Input(0).template IsType<double>()) {
return DoRunWithType<double>();
} else {
CAFFE_THROW(
"Mean operator only supports 32-bit float or 64-bit double, but",
" input was of type ",
Input(0).dtype().name());
}
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_MEAN_OPS_H_
| 3,314
| 24.305344
| 75
|
h
|
null |
pytorch-main/caffe2/operators/merge_id_lists_op.h
|
#ifndef CAFFE2_OPERATORS_MERGE_ID_LISTS_OP_H_
#define CAFFE2_OPERATORS_MERGE_ID_LISTS_OP_H_
#include <set>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include <c10/util/irange.h>
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(MergeIdLists);
namespace caffe2 {
template <class Context>
class MergeIdListsOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(MergeIdListsOp);
template <typename T>
bool DoRunWithType() {
auto& first_lengths = Input(0);
CAFFE_ENFORCE_EQ(first_lengths.dim(), 1, "LENGTHS should be 1-D");
const auto batch_size = first_lengths.numel();
auto* out_lengths = Output(0, first_lengths.sizes(), at::dtype<int32_t>());
auto* out_lengths_data = out_lengths->template mutable_data<int32_t>();
/**
* Loop to figure out how much space to reserve for output
* and perform checks.
*/
auto M = 0;
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (size_t i = 0; i < InputSize(); i += 2) {
auto& lengths = Input(i);
CAFFE_ENFORCE_EQ(lengths.dim(), 1, "LENGTHS should be 1-D");
CAFFE_ENFORCE_EQ(lengths.numel(), batch_size, "LENGTHS should be equal");
auto& values = Input(i + 1);
CAFFE_ENFORCE_EQ(values.dim(), 1, "VALUES should be 1-D");
M += values.numel();
}
auto* out_values = Output(1, {M}, at::dtype<T>());
T* out_values_data = out_values->template mutable_data<T>();
auto pos = 0;
// TODO(badri): Use unordered_set if performance is an issue
std::set<T> deduped;
std::vector<int> offsets(InputSize(), 0);
for (const auto sample : c10::irange(batch_size)) {
for (size_t i = 0; i < InputSize(); i += 2) {
auto& lengths = Input(i);
const auto* lengths_data = lengths.template data<int32_t>();
auto& values = Input(i + 1);
const T* values_data = values.template data<T>();
const auto length = lengths_data[sample];
for (auto j = offsets[i]; j < offsets[i] + length; j++) {
deduped.insert(values_data[j]);
}
offsets[i] += length;
}
for (auto val : deduped) {
out_values_data[pos++] = val;
}
out_lengths_data[sample] = deduped.size();
deduped.clear();
}
out_values->Resize(pos);
return true;
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(1));
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_MERGE_ID_LISTS_OP_H_
| 2,596
| 29.197674
| 79
|
h
|
null |
pytorch-main/caffe2/operators/minmax_ops.h
|
#ifndef CAFFE2_OPERATORS_MINMAX_OPS_H_
#define CAFFE2_OPERATORS_MINMAX_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
#include <c10/util/irange.h>
namespace caffe2 {
template <typename T, class Context>
class MaxOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(MaxOp)
bool RunOnDevice() override {
const auto& X0 = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X0);
const T* X0_data = X0.template data<T>();
T* Y_data = Y->template mutable_data<T>();
const int N = X0.numel();
if (InputSize() == 1) {
if (Y != &X0) {
context_.template CopySameDevice<T>(N, X0_data, Y_data);
}
return true;
}
const auto& X1 = Input(1);
CAFFE_ENFORCE_EQ(
X0.sizes(),
Y->sizes(),
"Description: Input #1, input dimension:",
X1.sizes(),
" should match output dimension: ",
Y->sizes());
const T* X1_data = X1.template data<T>();
math::Max<T, Context>(N, X0_data, X1_data, Y_data, &context_);
for (const auto i : c10::irange(2, InputSize())) {
const auto& Xi = Input(i);
CAFFE_ENFORCE_EQ(
Xi.sizes(),
Y->sizes(),
"Description: Input #",
i,
", input dimension:",
Input(i).sizes(),
" should match output dimension: ",
Y->sizes());
const T* Xi_data = Xi.template data<T>();
math::Max<T, Context>(N, Y_data, Xi_data, Y_data, &context_);
}
return true;
}
};
template <typename T, class Context>
class MinOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(MinOp)
bool RunOnDevice() override {
const auto& X0 = Input(0);
auto* Y = Output(0);
Y->ResizeLike(X0);
const T* X0_data = X0.template data<T>();
T* Y_data = Y->template mutable_data<T>();
const int N = X0.numel();
if (InputSize() == 1) {
if (Y != &X0) {
context_.template CopySameDevice<T>(N, X0_data, Y_data);
}
return true;
}
const auto& X1 = Input(1);
CAFFE_ENFORCE_EQ(
X0.sizes(),
Y->sizes(),
"Description: Input #1, input dimension:",
X1.sizes(),
" should match output dimension: ",
Y->sizes());
const T* X1_data = X1.template data<T>();
math::Min<T, Context>(N, X0_data, X1_data, Y_data, &context_);
for (const auto i : c10::irange(2, InputSize())) {
const auto& Xi = Input(i);
CAFFE_ENFORCE_EQ(
Xi.sizes(),
Y->sizes(),
"Description: Input #",
i,
", input dimension:",
Input(i).sizes(),
" should match output dimension: ",
Y->sizes());
const T* Xi_data = Xi.template data<T>();
math::Min<T, Context>(N, Y_data, Xi_data, Y_data, &context_);
}
return true;
}
};
template <typename T, class Context>
class SelectGradientOpBase : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(SelectGradientOpBase)
bool RunOnDevice() override;
};
template <typename T, class Context>
class MaxGradientOp final : public SelectGradientOpBase<T, Context> {
public:
template <class... Args>
explicit MaxGradientOp(Args&&... args)
: SelectGradientOpBase<T, Context>(std::forward<Args>(args)...) {}
~MaxGradientOp() override = default;
};
template <typename T, class Context>
class MinGradientOp final : public SelectGradientOpBase<T, Context> {
public:
template <class... Args>
explicit MinGradientOp(Args&&... args)
: SelectGradientOpBase<T, Context>(std::forward<Args>(args)...) {}
~MinGradientOp() override = default;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_MINMAX_OPS_H_
| 3,898
| 26.652482
| 72
|
h
|
null |
pytorch-main/caffe2/operators/mish_op.h
|
#ifndef CAFFE2_OPERATORS_MISH_OP_H_
#define CAFFE2_OPERATORS_MISH_OP_H_
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
struct MishFunctor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const;
};
template <class Context>
class MishGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(MishGradientOp)
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <typename T>
bool DoRunWithType();
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(INPUT));
}
private:
INPUT_TAGS(INPUT, OUTPUT, OUTPUT_GRAD);
OUTPUT_TAGS(INPUT_GRAD);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_Mish_OP_H_
| 794
| 21.083333
| 80
|
h
|
null |
pytorch-main/caffe2/operators/mod_op.h
|
#ifndef CAFFE_OPERATORS_MOD_OP_H_
#define CAFFE_OPERATORS_MOD_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class ModOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ModOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
divisor_ = this->template GetSingleArgument<int64_t>("divisor", 0);
CAFFE_ENFORCE_NE(divisor_, 0, "divisor must not be 0");
sign_follow_divisor_ =
this->template GetSingleArgument<bool>("sign_follow_divisor", false);
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int, int64_t>>::call(this, Input(DATA));
}
template <typename T>
bool DoRunWithType();
protected:
INPUT_TAGS(DATA);
private:
int64_t divisor_;
bool sign_follow_divisor_;
};
} // namespace caffe2
#endif // CAFFE_OPERATORS_MOD_OP_H_
| 984
| 23.02439
| 78
|
h
|
null |
pytorch-main/caffe2/operators/moments_op.h
|
#ifndef CAFFE2_OPERATORS_MOMENTS_OP_H_
#define CAFFE2_OPERATORS_MOMENTS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include <c10/util/irange.h>
#include <algorithm>
#include <vector>
namespace caffe2 {
template <typename T, class Context>
class MomentsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit MomentsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axes_(this->template GetRepeatedArgument<int>("axes")),
OP_SINGLE_ARG(bool, "keepdims", keep_dims_, true),
OP_SINGLE_ARG(bool, "allow_broadcast_fastpath", allow_broadcast_fastpath_, true) {}
bool RunOnDevice() override {
const auto& X = Input(0);
const int ndim = X.dim();
if (axes_.empty()) {
axes_.resize(ndim);
std::iota(axes_.begin(), axes_.end(), 0);
} else {
std::sort(axes_.begin(), axes_.end());
CAFFE_ENFORCE_GE(axes_.front(), 0, "Axes ids must be non-negative.");
CAFFE_ENFORCE_LT(
axes_.back(),
ndim,
"Axes ids must be smaller than the dimensions of input.");
}
const std::vector<int> X_dims(X.sizes().cbegin(), X.sizes().cend());
std::vector<int> Y_dims = X_dims;
for (const int axis : axes_) {
Y_dims[axis] = 1;
}
std::vector<std::int64_t> output_dims;
output_dims.reserve(ndim);
std::size_t cur_axis = 0;
for (const auto i : c10::irange(ndim)) {
if (cur_axis < axes_.size() && i == axes_[cur_axis]) {
if (keep_dims_) {
output_dims.push_back(1);
}
++cur_axis;
} else {
output_dims.push_back(X_dims[i]);
}
}
auto* mean = Output(0, output_dims, at::dtype<T>());
auto* var = Output(1, output_dims, at::dtype<T>());
math::Moments<float, Context>(
X_dims.size(),
X_dims.data(),
Y_dims.data(),
X.template data<T>(),
mean->template mutable_data<T>(),
var->template mutable_data<T>(),
&context_,
allow_broadcast_fastpath_);
return true;
}
private:
std::vector<int> axes_;
const int keep_dims_;
const bool allow_broadcast_fastpath_;
};
template <typename T, class Context>
class MomentsGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit MomentsGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axes_(this->template GetRepeatedArgument<int>("axes")),
OP_SINGLE_ARG(bool, "allow_broadcast_fastpath", allow_broadcast_fastpath_, true) {}
bool RunOnDevice() override {
const auto& dmean = Input(0);
const auto& dvariance = Input(1);
const auto& X = Input(2);
const auto& mean = Input(3);
const int ndim = X.dim();
if (axes_.empty()) {
axes_.resize(ndim);
std::iota(axes_.begin(), axes_.end(), 0);
} else {
std::sort(axes_.begin(), axes_.end());
CAFFE_ENFORCE_GE(axes_.front(), 0, "Axes ids must be non-negative.");
CAFFE_ENFORCE_LT(
axes_.back(),
ndim,
"Axes ids must be smaller than the dimensions of input.");
}
const std::vector<int> dX_dims(X.sizes().cbegin(), X.sizes().cend());
std::vector<int> dY_dims = dX_dims;
for (const int axis : axes_) {
dY_dims[axis] = 1;
}
auto* dX = Output(0, X.sizes(), at::dtype<T>());
return Compute(
dY_dims,
dX_dims,
dmean.template data<T>(),
dvariance.template data<T>(),
X.template data<T>(),
mean.template data<T>(),
dX->template mutable_data<T>());
}
private:
bool Compute(
const std::vector<int>& dY_dims,
const std::vector<int>& dX_dims,
const T* dmean_data,
const T* dvariance_data,
const T* X_data,
const T* mean_data,
T* dX_data);
std::vector<int> axes_;
const bool allow_broadcast_fastpath_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_MOMENTS_OP_H_
| 4,088
| 28
| 91
|
h
|
null |
pytorch-main/caffe2/operators/multi_class_accuracy_op.h
|
#ifndef CAFFE2_OPERATORS_MULTI_CLASS_ACCURACY_OP_H_
#define CAFFE2_OPERATORS_MULTI_CLASS_ACCURACY_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class MultiClassAccuracyOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(MultiClassAccuracyOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
INPUT_TAGS(PREDICTION, LABEL);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_MULTI_CLASS_ACCURACY_OP_H_
| 539
| 22.478261
| 61
|
h
|
null |
pytorch-main/caffe2/operators/ngram_ops.h
|
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
#include <vector>
namespace caffe2 {
template <typename F, typename T, class Context>
class NGramFromCategoricalOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit NGramFromCategoricalOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
col_ids_(this->template GetRepeatedArgument<int>("col_ids")),
categorical_limits_(
this->template GetRepeatedArgument<int>("categorical_limits")),
vals_(this->template GetRepeatedArgument<int>("vals")) {
col_num_ = col_ids_.size();
max_col_id_ = *std::max_element(col_ids_.begin(), col_ids_.end());
CAFFE_ENFORCE_EQ(col_num_, categorical_limits_.size());
int expected_vals_size = 0;
for (auto& l : categorical_limits_) {
CAFFE_ENFORCE_GT(l, 0);
expected_vals_size += l;
}
CAFFE_ENFORCE_EQ(expected_vals_size, vals_.size());
// compute ngram maps with small end
for (auto& j : col_ids_) {
CAFFE_ENFORCE_GE(j, 0);
ngram_maps_.push_back(std::map<int, int>());
}
int base = 1;
int idx = 0;
for (const auto k : c10::irange(col_num_)) {
int l = categorical_limits_[k];
for (const auto m : c10::irange(l)) {
int v = vals_[idx++];
ngram_maps_[k][v] = m * base;
}
base *= l;
}
}
bool RunOnDevice() override {
auto& floats = Input(0);
auto N = floats.size(0);
auto D = floats.size_from_dim(1);
const F* floats_data = floats.template data<F>();
auto* output = Output(0, {N}, at::dtype<T>());
auto* output_data = output->template mutable_data<T>();
math::Set<T, Context>(output->numel(), 0, output_data, &context_);
CAFFE_ENFORCE_GT(D, max_col_id_);
for (const auto i : c10::irange(N)) {
for (const auto k : c10::irange(col_num_)) {
int j = col_ids_[k];
int v = round(floats_data[i * D + j]);
// for out-of-vocabulary values, we always treat them the same as the
// first value specified in vals; if we want to mimic the behavior as
// sigrid NGram transform, just push front a random/impossible value at
// each segments of vals
output_data[i] += ngram_maps_[k].find(v) == ngram_maps_[k].end()
? 0
: ngram_maps_[k][v];
}
}
return true;
}
private:
std::vector<int> col_ids_;
std::vector<int> categorical_limits_;
std::vector<int> vals_;
std::vector<std::map<int, int>> ngram_maps_;
int col_num_;
int max_col_id_;
};
} // namespace caffe2
| 2,705
| 30.835294
| 79
|
h
|
null |
pytorch-main/caffe2/operators/no_default_engine_op.h
|
#ifndef CAFFE2_OPERATORS_NO_DEFAULT_ENGINE_OP_H_
#define CAFFE2_OPERATORS_NO_DEFAULT_ENGINE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
/**
* A helper class to denote that an op does not have a default engine.
*
* NoDefaultEngineOp is a helper class that one can use to denote that a
* specific operator is not intended to be called without an explicit engine
* given. This is the case for e.g. the communication operators where one has
* to specify a backend (like MPI or ZEROMQ).
*/
template <class Context>
class NoDefaultEngineOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(NoDefaultEngineOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
CAFFE_THROW(
"The operator ",
this->debug_def().type(),
" does not have a default engine implementation. Please "
"specify an engine explicitly for this operator.");
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_NO_DEFAULT_ENGINE_OP_H_
| 1,063
| 28.555556
| 77
|
h
|
null |
pytorch-main/caffe2/operators/normalize_l1_op.h
|
#ifndef CAFFE2_OPERATORS_NORMALIZE_L1_OP_H_
#define CAFFE2_OPERATORS_NORMALIZE_L1_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class NormalizeL1Op final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(NormalizeL1Op)
bool RunOnDevice() override {
const auto& x = Input(0);
const auto* xData = x.template data<T>();
auto* y = Output(0, x.sizes(), at::dtype<T>());
auto* yData = y->template mutable_data<T>();
const auto canonical_axis = x.canonical_axis_index(
this->template GetSingleArgument<int>("axis", -1));
const int m = x.dim32(canonical_axis);
const int n = x.numel() / m;
const int sf = x.size_from_dim(canonical_axis + 1);
DoNormalize(xData, yData, m, n, sf);
return true;
}
private:
void
DoNormalize(const T* xData, T* yData, const int m, const int n, const int sf);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_NORMALIZE_L1_OP_H_
| 1,075
| 25.9
| 80
|
h
|
null |
pytorch-main/caffe2/operators/normalize_op.h
|
#ifndef CAFFE2_OPERATORS_NORMALIZE_OP_H_
#define CAFFE2_OPERATORS_NORMALIZE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
#define KEPS 1e-12f
namespace caffe2 {
template <typename T, class Context>
class NormalizeOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit NormalizeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
const auto& x = Input(0);
const auto* xData = x.template data<T>();
auto* y = Output(0, x.sizes(), at::dtype<T>());
auto* yData = y->template mutable_data<T>();
const auto canonical_axis = x.canonical_axis_index(
this->template GetSingleArgument<int>("axis", -1));
const int64_t m = x.dim(canonical_axis);
const size_t n = x.numel() / m;
const size_t sf = x.size_from_dim(canonical_axis + 1);
DoNormalize(xData, yData, m, n, sf);
return true;
}
private:
const T kEps_ = KEPS;
void DoNormalize(
const T* xData,
T* yData,
const int m,
const int n,
const int sf) {
using InnerStride = Eigen::InnerStride<Eigen::Dynamic>;
using StridedVec =
Eigen::Map<Eigen::Matrix<T, 1, Eigen::Dynamic>, 0, InnerStride>;
using ConstStridedVec =
Eigen::Map<const Eigen::Matrix<T, 1, Eigen::Dynamic>, 0, InnerStride>;
for (const auto i : c10::irange(n)) {
auto base = (i / sf) * sf * m + (i % sf);
ConstStridedVec xVec(xData + base, 1, m, InnerStride(sf));
auto norm = xVec.template lpNorm<2>();
norm = std::max(norm, kEps_);
StridedVec yVec(yData + base, 1, m, InnerStride(sf));
yVec = xVec / norm;
}
}
};
template <typename T, class Context>
class NormalizeGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit NormalizeGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
const auto& x = Input(0);
const auto& gOut = Input(GRAD_OUT);
auto* gIn = Output(GRAD_IN, gOut.sizes(), at::dtype<T>());
const auto* xData = x.template data<T>();
const auto* gOutData = gOut.template data<T>();
auto* gInData = gIn->template mutable_data<T>();
const auto canonical_axis = x.canonical_axis_index(
this->template GetSingleArgument<int>("axis", -1));
const int m = x.dim32(canonical_axis);
const int n = x.numel() / m;
const int sf = x.size_from_dim(canonical_axis + 1);
DoNormalize(xData, gOutData, gInData, m, n, sf);
return true;
}
private:
const T kEps_ = KEPS;
void DoNormalize(
const T* xData,
const T* gOutData,
T* gInData,
const int m,
const int n,
const int sf);
INPUT_TAGS(INPUT, GRAD_OUT);
OUTPUT_TAGS(GRAD_IN);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_NORMALIZE_OP_H_
| 3,021
| 27.509434
| 78
|
h
|
null |
pytorch-main/caffe2/operators/numpy_tile_op.h
|
#ifndef CAFFE2_OPERATORS_NUMPY_TILE_OP_H_
#define CAFFE2_OPERATORS_NUMPY_TILE_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
// Copy a Blob n times along a specified axis.
template <class Context>
class NumpyTileOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit NumpyTileOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
~NumpyTileOp() override {}
bool RunOnDevice() override {
const auto& input = Input(0);
const auto& repeats = Input(1);
// Check that the `repeats` tensor has the correct rank, has a number of
// elements equal to the number of axes of `input`.
CAFFE_ENFORCE_EQ(repeats.dim(), 1, "repeats input must be a 1-d tensor");
CAFFE_ENFORCE_EQ(
repeats.numel(),
input.dim(),
"repeats input have the same"
" number of elements as `inputs` has dimensions.");
const int64_t* repeats_data = repeats.template data<int64_t>();
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto i : c10::irange(repeats.numel())) {
CAFFE_ENFORCE_GE(repeats_data[i], 0);
}
auto* output = Output(0);
// Alternate inputs and outputs between two buffers. Repeatedly apply the
// Tile kernel along each axis. Then copy out the resulting data into the
// output tensor.
Tensor *src = &buffer, *dst = output;
src->CopyFrom(input);
vector<int64_t> output_dims(input.sizes().vec());
for (const auto i : c10::irange(repeats.numel())) {
if (repeats_data[i] == 1) {
continue;
}
// size up to (and not including) axis
const auto outer_dim = src->size_to_dim(i);
// size from axis up
const auto inner_dim = src->size_from_dim(i);
dst->Resize(outer_dim, inner_dim * repeats_data[i]);
/**
* How this works:
* Imagine a 2D tensor (matrix) of size 3x10, tiled 2 times.
* - Tiling along axis 0 (row) means copying the entire 3x10 Matrix 2
* times. outer_dim = 0, inner_dim = 30.
* - Tiling along axis 1 (column) means copying each row 2 times, then
* proceed to the next row, until the end. outer_dim = 3, inner_dim = 10.
*/
const char* src_data = static_cast<const char*>(src->raw_data());
char* dst_data = static_cast<char*>(dst->raw_mutable_data(src->dtype()));
DoTile(
src->dtype(),
src->itemsize(),
outer_dim,
inner_dim,
repeats_data[i],
src_data,
dst_data);
output_dims[i] *= repeats_data[i];
dst->Reshape(output_dims);
std::swap(src, dst);
}
// NB: because we have the swap at the end of the above loop, our real
// result tensor is going to live in *src when we reach this line
// whether we entered the loop or not :)
if (output != src)
output->CopyFrom(*src);
return true;
}
private:
void DoTile(
const TypeMeta meta,
int item_size,
int outer_dim,
int inner_dim,
int64_t num_tiles,
const char* input_data,
char* output_data) {
for (const auto i : c10::irange(outer_dim)) {
(void)i; // Suppress unused variable warning
for (const auto t : c10::irange(num_tiles)) {
(void)t; // Suppress unused variable warning
context_.CopyItemsSameDevice(meta, inner_dim, input_data, output_data);
output_data += inner_dim * item_size;
}
input_data += inner_dim * item_size;
}
}
Tensor buffer{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_NUMPY_TILE_OP_H_
| 3,809
| 30.487603
| 79
|
h
|
null |
pytorch-main/caffe2/operators/one_hot_ops.h
|
#ifndef CAFFE_OPERATORS_ONE_HOT_OPS_H_
#define CAFFE_OPERATORS_ONE_HOT_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(BatchBucketOneHot);
namespace caffe2 {
template <class Context>
class OneHotOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit OneHotOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
auto& indices = Input(0);
CAFFE_ENFORCE_EQ(
indices.dim(),
1,
"indices input must be 1D tensor of data type int64_t");
// Index size input must be in CPU context
auto& index_size_tensor = this->template Input<Tensor>(1, CPU);
CAFFE_ENFORCE_EQ(
index_size_tensor.numel(),
1,
"index_size_tensor input must be scalar of data type int64_t");
auto batch_size = indices.numel();
auto index_size = *index_size_tensor.template data<int64_t>();
auto one_hots = Output(0);
one_hots->Resize(batch_size, index_size);
auto output_size = one_hots->numel();
if (output_size == 0) {
return true;
}
DoOneHotOp(batch_size, index_size, indices, one_hots);
return true;
}
protected:
void DoOneHotOp(
int64_t batch_size,
int64_t index_size,
const Tensor& indices,
Tensor* output);
};
template <class Context>
class BatchOneHotOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchOneHotOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(X));
}
template <typename T>
bool DoRunWithType();
INPUT_TAGS(X, LENS, VALS);
protected:
OUTPUT_TAGS(ONE_HOT);
private:
// allows for fast random access to a given dict and is re-used across runs
std::vector<int64_t> valsOffsets_;
};
template <class Context>
class BatchBucketOneHotOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit BatchBucketOneHotOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override;
protected:
INPUT_TAGS(X, LENS, BOUNDARIES);
OUTPUT_TAGS(ONE_HOT);
};
} // namespace caffe2
#endif // CAFFE_OPERATORS_ONE_HOT_OPS_H_
| 2,562
| 24.376238
| 79
|
h
|
null |
pytorch-main/caffe2/operators/onnx_while_op.h
|
#ifndef CAFFE2_OPERATORS_ONNX_WHILE_OP_H_
#define CAFFE2_OPERATORS_ONNX_WHILE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/create_scope_op.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class ONNXWhileOp final : public Operator<Context> {
public:
explicit ONNXWhileOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
parent_ws_(ws),
has_trip_count_(
this->template GetSingleArgument<int64_t>("has_trip_count", 0)),
has_cond_(this->template GetSingleArgument<int64_t>("has_cond", 0)),
save_scopes_(
this->template GetSingleArgument<int64_t>("save_scopes", 0)),
disable_scopes_(
this->template GetSingleArgument<int64_t>("disable_scopes", 0)),
num_loop_carried_deps_(this->template GetSingleArgument<int64_t>(
"num_loop_carried_deps",
-1)) {
CAFFE_ENFORCE(
this->template HasSingleArgumentOfType<NetDef>("body"),
"body net must be specified in ONNXWhile operator");
if (disable_scopes_) {
CAFFE_ENFORCE(
!save_scopes_, "Cannot save scopes when disable_scopes=True");
}
body_net_def_ = this->template GetSingleArgument<NetDef>("body", NetDef());
static int64_t counter = -1;
if (!body_net_def_.has_name()) {
if (counter == -1) {
++counter;
body_net_def_.set_name("loop_net");
} else {
++counter;
body_net_def_.set_name("loop_net." + c10::to_string(counter));
}
}
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int, bool, long>>::call(this, Input(1));
}
// Operator
// Inputs: max trip count, condition, initial loop-carried dependencies
// Outputs: Final loop-carried dependencies, scan_outputs
// Body
// Inputs: iteration number, condition, loop-carried dependencies
// Outputs: condition, loop-carried dependencies, scan_outputs
template <typename CondVarType>
bool DoRunWithType() {
// Clear workspaces from the previous invocations of the loop
// and setup a local scope for the first iteration
ws_stack_.clear();
auto loop_ws = !disable_scopes_
? ws_stack_.pushForwardWorkspace(parent_ws_).get()
: parent_ws_;
constexpr int64_t num_inputs_before_lcds = 2;
// First input is the maximumt trip count. Second input is the condition
// variable (for the first iteration). The rest of the inputs are
// loop-carried dependencies.
int64_t num_loop_carried_deps;
if (num_loop_carried_deps_ != -1) {
num_loop_carried_deps = num_loop_carried_deps_;
} else {
num_loop_carried_deps = InputSize() - num_inputs_before_lcds;
}
int64_t max_trip_count = *Input(0).template data<int64_t>();
const bool first_iter_condition = *Input(1).template data<CondVarType>();
scope_ = std::make_shared<LocalScope>(
loop_ws, body_net_def_, num_loop_carried_deps);
// Body graph has 1+N+K outputs: recalculated condition variable, N
// loop-carried dependencies, and K scan_outputs
int num_scan_outputs =
scope_->net()->external_output().size() - num_loop_carried_deps - 1;
CAFFE_ENFORCE_GE(
num_scan_outputs,
0,
"Body graph must have N+K outputs, where N is the number "
"of loop-carried dependencies and K is the number of scan "
"outputs");
// Copy initial loop-carried dependencies
for (const auto i : c10::irange(num_loop_carried_deps)) {
scope_->lcd_tensor(i)->CopyFrom(Input(i + num_inputs_before_lcds));
}
// Initialize iteration variable
scope_->set_iteration(0ll);
// Initialize input condition variable
scope_->template set_input_condition<CondVarType>(first_iter_condition);
auto valid_iter_num = [this, max_trip_count](int64_t i) {
if (has_trip_count_) {
return i < max_trip_count;
} else {
return true;
}
};
auto condition_true = [this, first_iter_condition](
int64_t i, bool cond_value) {
if (has_cond_) {
if (i == 0) {
return (bool)first_iter_condition;
} else {
return cond_value;
}
} else {
return true;
}
};
// Allocate scan_outputs for zero-iteration case
for (const auto i : c10::irange(num_scan_outputs)) {
Output(i + num_loop_carried_deps)->Resize(0);
Output(i + num_loop_carried_deps)->template mutable_data<int32_t>();
}
// Use this to keep track of the sizes of the scan outputs and validate
// they're the same across iterations.
std::vector<std::vector<int64_t>> scan_outputs_sizes;
Workspace* cur_ws = nullptr;
bool cur_output_condition = false;
while (true) {
int64_t itr = scope_->iteration();
if (valid_iter_num(itr) && condition_true(itr, cur_output_condition)) {
if (!scope_->net()->Run()) {
return false;
}
cur_ws = scope_->workspace();
cur_output_condition = scope_->template output_condition<CondVarType>();
if (save_scopes_) {
loop_ws = ws_stack_.pushForwardWorkspace(parent_ws_).get();
scope_ = std::make_shared<LocalScope>(
loop_ws, body_net_def_, num_loop_carried_deps);
}
// Copy forward loop-carried dependencies
for (const auto i : c10::irange(num_loop_carried_deps)) {
Blob* b = cur_ws->GetBlob(scope_->net()->external_output()[i + 1]);
const Tensor& t = b->template Get<Tensor>();
scope_->lcd_tensor(i)->CopyFrom(t);
}
// Copy out scan_outputs
for (const auto i : c10::irange(num_scan_outputs)) {
int net_output_idx = i + 1 + num_loop_carried_deps;
const Tensor& scan_output =
cur_ws->GetBlob(scope_->net()->external_output()[net_output_idx])
->template Get<Tensor>();
auto* scan_output_target = Output(i + num_loop_carried_deps);
if (itr == 0) {
auto dims = scan_output.sizes().vec();
scan_outputs_sizes.push_back(dims);
dims.insert(dims.begin(), 1);
scan_output_target->Resize(dims);
scan_output_target->CopyFrom(scan_output);
} else {
auto dims = scan_output.sizes().vec();
CAFFE_ENFORCE_EQ(
dims,
scan_outputs_sizes[i],
"Size of scan output changed across iterations");
dims.insert(dims.begin(), itr);
scan_output_target->Extend(1, 100);
int64_t timestep_size = 1;
for (const int64_t t : scan_outputs_sizes[i]) {
timestep_size *= t;
}
const void* src_data = scan_output.raw_data();
auto& sot_meta = scan_output_target->dtype();
void* dst_data =
(char*)scan_output_target->raw_mutable_data(sot_meta) +
timestep_size * scan_output.itemsize() * itr;
memcpy(dst_data, src_data, timestep_size * scan_output.itemsize());
}
}
scope_->set_iteration(itr + 1ll);
scope_->template set_input_condition<CondVarType>(cur_output_condition);
} else {
break;
}
}
// Copy out final loop-carried dependencies
for (const auto i : c10::irange(num_loop_carried_deps)) {
Output(i)->CopyFrom(*scope_->lcd_tensor(i));
}
return true;
}
private:
class LocalScope {
public:
LocalScope(Workspace* loop_ws, const NetDef& body_net_def, size_t num_lcds)
: loop_ws_(loop_ws) {
CAFFE_ENFORCE(loop_ws_, "Failed to initialize local loop workspace");
// Create loop-carried deps in Workspace
lcd_tensors_.clear();
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int i = 2; i < num_lcds + 2; ++i) {
Blob* b = loop_ws_->CreateBlob(body_net_def.external_input(i));
Tensor* t = BlobGetMutableTensor(b, Context::GetDeviceType());
lcd_tensors_.push_back(t);
}
// First output is the iteration variable
auto* iteration_var_blob =
loop_ws_->CreateBlob(body_net_def.external_input(0));
iteration_var_ =
BlobGetMutableTensor(iteration_var_blob, Context::GetDeviceType());
input_condition_var_ = BlobGetMutableTensor(
loop_ws_->CreateBlob(body_net_def.external_input(1)),
Context::GetDeviceType());
auto* condition_var_blob =
loop_ws_->CreateBlob(body_net_def.external_output(0));
condition_var_ =
BlobGetMutableTensor(condition_var_blob, Context::GetDeviceType());
condition_var_->Resize(1);
condition_var_->template mutable_data<bool>();
body_net_ = loop_ws_->GetNet(body_net_def.name());
if (!body_net_) {
body_net_ = loop_ws_->CreateNet(body_net_def, true);
}
CAFFE_ENFORCE(body_net_, "Failed to initialize loop subnet");
}
NetBase* net() const {
return body_net_;
}
Workspace* workspace() const {
return loop_ws_;
}
int64_t iteration() const {
auto* iteration_var_ptr =
iteration_var_->template mutable_data<int64_t>();
return *iteration_var_ptr;
}
Tensor* lcd_tensor(int idx) {
return lcd_tensors_[idx];
}
void set_iteration(int64_t itr) {
iteration_var_->Resize();
auto* iteration_var_ptr =
iteration_var_->template mutable_data<int64_t>();
*iteration_var_ptr = itr;
}
template <typename CondVarType>
void set_input_condition(bool cond_value) {
input_condition_var_->Resize(1);
auto* input_condition_var_ptr =
input_condition_var_->template mutable_data<CondVarType>();
*input_condition_var_ptr = cond_value;
}
template <typename CondVarType>
bool output_condition() const {
auto* condition_var_ptr =
condition_var_->template mutable_data<CondVarType>();
return *condition_var_ptr;
}
private:
Workspace* loop_ws_;
NetBase* body_net_; // owned by a workspace
Tensor* iteration_var_;
Tensor* input_condition_var_;
Tensor* condition_var_;
std::vector<Tensor*> lcd_tensors_;
};
NetDef body_net_def_;
Workspace* parent_ws_;
detail::WorkspaceStack ws_stack_;
bool has_trip_count_;
bool has_cond_;
bool save_scopes_;
bool disable_scopes_;
int64_t num_loop_carried_deps_;
std::shared_ptr<LocalScope> scope_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ONNX_WHILE_OP_H
| 10,733
| 32.648903
| 80
|
h
|
null |
pytorch-main/caffe2/operators/op_utils_cudnn.h
|
#ifndef CAFFE2_OPERATORS_CUDNN_OP_UTILS_H_
#define CAFFE2_OPERATORS_CUDNN_OP_UTILS_H_
#include "caffe2/core/cudnn_wrappers.h"
namespace caffe2 {
// Earlier in the days Caffe sets the default cudnn workspace to 8MB. We bump
// it up to 64MB in Caffe2, as this enables the use of Winograd in many cases,
// something very beneficial to more recent CNN models.
static constexpr size_t kCONV_CUDNN_WORKSPACE_LIMIT_BYTES = 64 * 1024 * 1024;
// Manually specified number of algorithms implemented in CuDNN.
// This does not have any performance implications, as we will always find the
// fastest algorithm; setting them to the right number of algorithms will enable
// us to best report the statistics when doing an exhaustive search, though.
#if CUDNN_VERSION_MIN(7, 0, 0)
// Note: Double each of these due to potential
// tensorcore + non-tensorcore versions
// which are treated as separate returned algos
static constexpr size_t kNUM_CUDNN_FWD_ALGS =
2 * CUDNN_CONVOLUTION_FWD_ALGO_COUNT;
static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS =
2 * CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT;
static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS =
2 * CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT;
#else
static constexpr size_t kNUM_CUDNN_FWD_ALGS = 7;
static constexpr size_t kNUM_CUDNN_BWD_FILTER_ALGS = 4;
static constexpr size_t kNUM_CUDNN_BWD_DATA_ALGS = 5;
#endif
namespace {
template <typename ArrayOfcudnnConvolutionAlgoPerf_t>
inline void LogCuDNNPerfStats(
const ArrayOfcudnnConvolutionAlgoPerf_t& perf_stat,
int returned_algo_count) {
VLOG(1) << "Perf result: (algo: stat, time, memory)";
for (const auto i : c10::irange(returned_algo_count)) {
const auto& stat = perf_stat[i];
VLOG(1) << stat.algo << ": " << stat.status << " " << stat.time << " "
<< stat.memory;
}
}
} // namespace
// Easier indexing into force_algo_ vector,
// shared by CudnnConvTransposeOpBase and CudnnConvOpBase to force
// usage of a particular algorithm instead of searching
enum { ALGO_FWD = 0, ALGO_WGRAD = 1, ALGO_DGRAD = 2 };
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CUDNN_OP_UTILS_H_
| 2,120
| 37.563636
| 80
|
h
|
null |
pytorch-main/caffe2/operators/operator_fallback_gpu.h
|
#ifndef CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_
#define CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/operator.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
/**
* @brief A templated class to allow one to wrap a CPU operator as a CUDA
* operator.
*
* This class can be used when one does not have the CUDA implementation ready
* yet for an operator. Essentially, what this op does is to automatically
* deal with data copy for you. Plausibly, this causes a lot of overhead and
* is not optimal, so you should use this operator mostly for quick prototyping
* purpose.
*
* All the input and output of the original operator should be TensorCPU.
*
* Example usage: if you have a class MyMagicOp that is CPU based, and you use
* the registration code
* REGISTER_CPU_OPERATOR(MyMagic, MyMagicOp);
* to register the CPU side, you can create its corresponding GPU operator
* (with performance hits of course) via
* REGISTER_CUDA_OPERATOR(MyMagic,
* GPUFallbackOp);
* Note that you will need to make sure that the operators actually share the
* same name.
*
* Advanced usage: if you want to have some specific outputs never copied, you
* can use the SkipOutputCopy template argument to do that. For example, if
* MyMagic produces two outputs and the first output is always going to live on
* the CPU, you can do
* REGISTER_CUDA_OPERATOR(MyMagic,
* GPUFallbackOpEx<SkipIndices<0>>);
*/
template <typename SkipOutputCopy>
class GPUFallbackOpEx final : public Operator<CUDAContext> {
public:
USE_OPERATOR_FUNCTIONS(CUDAContext);
explicit GPUFallbackOpEx(const OperatorDef& def, Workspace* ws)
: Operator<CUDAContext>(def, ws) {
CAFFE_ENFORCE_EQ(def.device_option().device_type(), PROTO_CUDA);
OperatorDef base_def_(def);
// base_def_ runs on CPU, so we will set its device option to CPU.
base_def_.clear_device_option();
base_def_.mutable_device_option()->set_device_type(PROTO_CPU);
// Set up the symbols for the local workspace.
for (const string& name : def.input()) {
local_input_blobs_.push_back(local_ws_.CreateBlob(name));
TORCH_CHECK_NOTNULL(local_input_blobs_.back());
}
base_op_ = CreateOperator(base_def_, &local_ws_);
for (const string& name : def.output()) {
local_output_blobs_.push_back(local_ws_.GetBlob(name));
TORCH_CHECK_NOTNULL(local_output_blobs_.back());
}
}
bool RunOnDevice() override {
for (const auto i : c10::irange(InputSize())) {
if (this->InputIsTensorType(i, CUDA)) {
// use sync copy
BlobGetMutableTensor(local_input_blobs_[i], CPU)->CopyFrom(Input(i));
} else {
VLOG(1) << "Input " << i << " is not TensorCUDA. Skipping copy.";
// Note(jiayq): This removes a const but conceptually
// local_input_blobs will only be used as const blob input for the
// base op so we are still fine.
local_input_blobs_[i]->ShareExternal(
const_cast<void*>(OperatorBase::Inputs()[i]->GetRaw()),
OperatorBase::Inputs()[i]->meta());
}
}
if (!base_op_->Run()) {
LOG(ERROR) << "Base op run failed in GPUFallbackOp. Def: "
<< ProtoDebugString(this->debug_def());
return false;
}
for (const auto i : c10::irange(OutputSize())) {
if (SkipOutputCopy::Contains(i)) {
VLOG(1) << "Copy output: index " << i << " skipped.";
continue;
}
CAFFE_ENFORCE(
BlobIsTensorType(*local_output_blobs_[i], CPU),
"GPU fallback op currently does not support non-TensorCPU "
"output type who needs copying.");
Output(i)->CopyFrom(local_output_blobs_[i]->template Get<TensorCPU>());
}
return true;
}
protected:
Workspace local_ws_;
vector<Blob*> local_input_blobs_;
vector<Blob*> local_output_blobs_;
unique_ptr<OperatorBase> base_op_;
};
using GPUFallbackOp = GPUFallbackOpEx<SkipIndices<>>;
} // namespace caffe2
#endif // CAFFE2_OPERATORS_OPERATOR_FALLBACK_H_
| 4,183
| 36.693694
| 79
|
h
|
null |
pytorch-main/caffe2/operators/order_switch_ops.h
|
#ifndef CAFFE2_OPERATORS_ORDER_SWITCH_OPS_H_
#define CAFFE2_OPERATORS_ORDER_SWITCH_OPS_H_
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include <c10/util/irange.h>
#include <vector>
namespace caffe2 {
// Note(Yangqing): I think it is possible to do a more general swapaxes operator
// but I am a little afraid of going down that general path. Only implementing
// the two actually needed ones here.
template <typename T, class Context>
class NHWC2NCHWOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(NHWC2NCHWOp);
bool RunOnDevice() override {
const auto& X = Input(0);
const int ndim = X.dim();
CAFFE_ENFORCE_GE(ndim, 3);
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
std::vector<int64_t> Y_dims(ndim);
Y_dims[0] = N;
Y_dims[1] = C;
int HxW = 1;
for (const auto i : c10::irange(2, ndim)) {
Y_dims[i] = X.dim32(i - 1);
HxW *= Y_dims[i];
}
auto* Y = Output(0, Y_dims, at::dtype<T>());
if (X.numel() <= 0) {
return true;
}
math::NHWC2NCHW<T, Context>(
N,
C,
HxW,
X.template data<T>(),
Y->template mutable_data<T>(),
&context_);
return true;
}
};
template <typename T, class Context>
class NCHW2NHWCOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(NCHW2NHWCOp);
bool RunOnDevice() override {
const auto& X = Input(0);
const int ndim = X.dim();
CAFFE_ENFORCE_GE(ndim, 3);
const int N = X.dim32(0);
const int C = X.dim32(1);
std::vector<int64_t> Y_dims(ndim);
Y_dims[0] = N;
Y_dims[ndim - 1] = C;
int HxW = 1;
for (int i = 1; i < ndim - 1; ++i) {
Y_dims[i] = X.dim32(i + 1);
HxW *= Y_dims[i];
}
auto* Y = Output(0, Y_dims, at::dtype<T>());
if (X.numel() <= 0) {
return true;
}
math::NCHW2NHWC<T, Context>(
N,
C,
HxW,
X.template data<T>(),
Y->template mutable_data<T>(),
&context_);
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ORDER_SWITCH_OPS_H_
| 2,189
| 22.548387
| 80
|
h
|
null |
pytorch-main/caffe2/operators/pack_rnn_sequence_op.h
|
#ifndef CAFFE2_OPERATORS_PACK_RNN_SEQUENCE_OP_H_
#define CAFFE2_OPERATORS_PACK_RNN_SEQUENCE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
#include <algorithm>
#include <vector>
namespace caffe2 {
template <class Context, bool Forward>
class PackRNNSequenceOpBase : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit PackRNNSequenceOpBase(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t, float, double>>::call(
this, Input(0));
}
template <typename ValT>
bool DoRunWithType() {
// The value is copied from the sequence to the pack
// if Forward is true, and vice versa
int dim_offset = Forward ? 1 : 2;
auto& values = Input(0);
CAFFE_ENFORCE_GT(values.dim(), dim_offset);
// block_size is the size for each individual feature
int64_t block_size = values.size_from_dim(dim_offset);
auto values_vec = values.template data<ValT>();
auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE_EQ(lengths.dim(), 1);
const auto cols = lengths.numel();
const int32_t* lengths_vec = lengths.template data<int32_t>();
// the total number of rows is defined as the max number from lengths
// if when the lengths is empty, we set rows = 0 to support zero lengths
const auto rows =
cols ? *std::max_element(lengths_vec, lengths_vec + cols) : 0;
CAFFE_ENFORCE_GE(rows, 0);
int length_sum = 0;
if (cols > 0) {
math::Sum<int, Context>(cols, lengths_vec, &length_sum, &context_);
}
vector<int64_t> shape;
// the output shape is rows * cols for the pack,
// or length_sum for the sequence
if (Forward) {
shape.push_back(rows);
shape.push_back(cols);
} else {
shape.push_back(length_sum);
}
// insert the dim for the feature
shape.insert(
shape.end(), values.sizes().begin() + dim_offset, values.sizes().end());
auto* output = Output(OUTPUTVALUE, shape, at::dtype<ValT>());
auto output_data = output->template mutable_data<ValT>();
// initialize output_data with zero, as it is the default value for padding
// when certain length is smaller than rows
math::Set<ValT, Context>(output->numel(), 0, output_data, &context_);
int32_t offset = 0;
for (const auto c : c10::irange(cols)) {
for (int r = 0; r < lengths_vec[c]; r++) {
auto input_offset = Forward ? (offset + r) : (r * cols + c);
auto output_offset = Forward ? (r * cols + c) : (offset + r);
context_.CopyItemsSameDevice(
values.dtype(),
block_size,
values_vec + input_offset * block_size,
output_data + output_offset * block_size);
}
offset += lengths_vec[c];
}
return true;
}
private:
INPUT_TAGS(INPUTVALUE, LENGTHS);
OUTPUT_TAGS(OUTPUTVALUE);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_PACK_RNN_SEQUENCE_OP_H_
| 3,112
| 31.427083
| 80
|
h
|
null |
pytorch-main/caffe2/operators/pack_segments.h
|
#ifndef CAFFE2_OPERATORS_PACK_SEGMENTS_H_
#define CAFFE2_OPERATORS_PACK_SEGMENTS_H_
#include <atomic>
#include <limits>
#include <mutex>
#include <unordered_map>
#include <vector>
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(PackSegments)
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(UnpackSegments)
namespace caffe2 {
template <class Context>
class PackSegmentsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_DISPATCH_HELPER;
template <class... Args>
explicit PackSegmentsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
max_length_(this->template GetSingleArgument<int>("max_length", -1)),
pad_minf_(this->template GetSingleArgument<bool>("pad_minf", false)),
return_presence_mask_(this->template GetSingleArgument<bool>(
"return_presence_mask",
false)) {
if (pad_minf_) {
padding_ = -1.0 * std::numeric_limits<float>::infinity();
} else {
padding_ = 0;
}
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int, long>>::call(this, Input(LENGTHS));
}
template <typename T>
bool DoRunWithType();
template <typename T, typename Data_T>
bool DoRunWithType2();
INPUT_TAGS(LENGTHS, DATA);
private:
int64_t max_length_;
bool pad_minf_;
float padding_;
bool return_presence_mask_;
// Scratch space required by the CUDA version
Tensor dev_buffer_{Context::GetDeviceType()};
Tensor dev_lengths_prefix_sum_{Context::GetDeviceType()};
Tensor dev_max_length_{Context::GetDeviceType()};
Tensor host_max_length_{CPU};
};
template <class Context>
class UnpackSegmentsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_DISPATCH_HELPER;
template <class... Args>
explicit UnpackSegmentsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
max_length_(this->template GetSingleArgument<int>("max_length", -1)) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int, long>>::call(this, Input(LENGTHS));
}
template <typename T>
bool DoRunWithType();
template <typename T, typename Data_T>
bool DoRunWithType2();
INPUT_TAGS(LENGTHS, DATA);
private:
int64_t max_length_;
Tensor dev_buffer_{Context::GetDeviceType()};
Tensor dev_lengths_prefix_sum_{Context::GetDeviceType()};
Tensor dev_max_length_{Context::GetDeviceType()};
Tensor dev_num_cell_{Context::GetDeviceType()};
Tensor host_max_length_{CPU};
Tensor host_num_cell_{CPU};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_PACK_SEGMENTS_H_
| 2,738
| 26.39
| 79
|
h
|
null |
pytorch-main/caffe2/operators/pad_op.h
|
#ifndef CAFFE2_OPERATORS_PAD_OP_H_
#define CAFFE2_OPERATORS_PAD_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// Padding mode similar to numpy.
enum class PadMode {
CONSTANT = 0, // pad constant values, with string "constant"
REFLECT = 1, // pads with reflect values, with string "reflect"
EDGE = 2, // pads with the edge values, with string "edge"
};
TORCH_API PadMode StringToPadMode(const string&);
template <typename T, class Context>
class PadImageOp final : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
template <class... Args>
explicit PadImageOp(Args&&... args)
: ConvPoolOpBase<Context>(std::forward<Args>(args)...),
mode_(StringToPadMode(
this->template GetSingleArgument<string>("mode", "constant"))),
value_(static_cast<T>(
this->template GetSingleArgument<float>("value", 0.0))) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::NOTSET,
"Padding layer only supports explicit pad values.");
CAFFE_ENFORCE(
dilation_h() == 1 && dilation_w() == 1,
"Pooling op does not support dilation right now.");
CAFFE_ENFORCE(
stride_h() == 1 && stride_w() == 1,
"Pooling op does not support stride right now.");
// Pad op does not use kernel sizes, so we set it to 1 for computing the
// output size.
kernel_.assign(pads_.size() / 2, 1);
}
~PadImageOp() override {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
static std::vector<TensorShape> PadTensorInference(
const OperatorDef& def,
const vector<TensorShape>& in);
private:
PadMode mode_;
T value_;
// Input: X
// Output: Y
};
template <typename T, class Context>
class PadImageGradientOp final : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
template <class... Args>
explicit PadImageGradientOp(Args&&... args)
: ConvPoolOpBase<Context>(std::forward<Args>(args)...),
mode_(StringToPadMode(
this->template GetSingleArgument<string>("mode", "constant"))) {
CAFFE_ENFORCE(
legacy_pad_ == LegacyPadding::NOTSET,
"Padding layer only supports explicit pad values.");
CAFFE_ENFORCE(
dilation_h() == 1 && dilation_w() == 1,
"Pooling op does not support dilation right now.");
// Pad op does not use kernel sizes, so we set it to 1 for computing the
// output size.
kernel_.assign(pads_.size() / 2, 1);
}
~PadImageGradientOp() override {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
PadMode mode_;
// Input: dY
// Output: dX
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_PAD_OP_H_
| 2,920
| 29.747368
| 76
|
h
|
null |
pytorch-main/caffe2/operators/partition_ops.h
|
#ifndef CAFFE2_OPERATORS_PARTITION_OPS_H_
#define CAFFE2_OPERATORS_PARTITION_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <typename Index>
static inline int moduloPartition(Index key, int numPartitions) {
int shard = key % numPartitions;
// equivalent to `if (shard < 0) shard += partitions;`
shard += numPartitions & (shard >> (sizeof(int) * 8 - 1));
return shard;
}
class GatherByKeyOp : public Operator<CPUContext> {
public:
USE_DISPATCH_HELPER;
USE_OPERATOR_FUNCTIONS(CPUContext);
template <class... Args>
explicit GatherByKeyOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {}
private:
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0));
}
private:
template <typename Index>
bool DoRunWithType() {
const auto numPartitions = InputSize() - 1;
CAFFE_ENFORCE_GE(numPartitions, 1);
const auto& keysTensor = Input(0);
const auto* keysData = keysTensor.template data<Index>();
const auto& keysShape = Input(0).sizes();
CAFFE_ENFORCE_EQ(
keysShape.size(), 1, "Only 1D keys tensor supported currently.");
// 1. Shape and type consistency checks
const auto& in0Shape = Input(1).sizes();
CAFFE_ENFORCE_GE(in0Shape.size(), 1);
vector<int64_t> outShape(keysShape.vec());
outShape.insert(outShape.end(), in0Shape.begin() + 1, in0Shape.end());
CAFFE_ENFORCE_GE(outShape.size(), 1);
auto totalSize = in0Shape[0];
auto meta = Input(1).dtype();
for (const auto i : c10::irange(2, InputSize())) {
const auto& input = Input(i);
CAFFE_ENFORCE(meta == input.dtype());
CAFFE_ENFORCE_GE(input.dim(), 1);
CAFFE_ENFORCE(std::equal(
outShape.begin() + keysShape.size(),
outShape.end(),
input.sizes().begin() + 1));
totalSize += input.size(0);
}
CAFFE_ENFORCE_EQ(keysTensor.numel(), totalSize);
auto* outTensor = Output(0);
outTensor->Resize(outShape);
auto* outData = static_cast<char*>(outTensor->raw_mutable_data(meta));
const auto blockSize = outTensor->size_from_dim(1);
inputDatas_.resize(numPartitions);
for (const auto i : c10::irange(numPartitions)) {
inputDatas_[i] = static_cast<const char*>(Input(i + 1).raw_data());
}
inStartOffsets_.assign(numPartitions, 0);
Index outStartOffset = 0;
int currentShard = -1;
// 2. copy from inputs into output based on shard for each input key
const auto numEntries = keysTensor.numel();
for (int64_t i = 0; i <= numEntries; ++i) {
auto newShard =
i < numEntries ? moduloPartition(keysData[i], numPartitions) : -1;
if (newShard != currentShard) {
if (currentShard != -1) {
auto inStartOffset = inStartOffsets_[currentShard];
auto numItems = i - outStartOffset;
context_.CopyItemsSameDevice(
meta,
numItems * blockSize,
inputDatas_[currentShard] +
inStartOffset * blockSize * meta.itemsize(),
outData + outStartOffset * blockSize * meta.itemsize());
inStartOffsets_[currentShard] += numItems;
}
currentShard = newShard;
outStartOffset = i;
}
}
return true;
}
std::vector<const char*> inputDatas_;
std::vector<int64_t> inStartOffsets_;
};
class PartitionOpBase : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
template <class... Args>
explicit PartitionOpBase(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "pack_first_input", pack_first_input_, 0) {}
protected:
template <typename Index>
void ApplyPartition(bool skipFirstArgument) {
CAFFE_ENFORCE_EQ(
OutputSize() % InputSize(),
0,
"Output number must be a multiple of input number");
int partitions = OutputSize() / InputSize();
int inputSize = InputSize();
int mainInputIndex = skipFirstArgument;
CAFFE_ENFORCE_GT(partitions, 0, "Invalid number of partitions");
auto& main_input = Input(mainInputIndex);
int64_t size = main_input.numel();
const Index* data = main_input.template data<Index>();
counts_.assign(partitions, 0);
for (const auto p : c10::irange(size)) {
int shard = moduloPartition(data[p], partitions);
++counts_[shard];
}
raw_datas_.resize(inputSize);
block_sizes_.resize(inputSize);
metas_.resize(inputSize);
out_datas_.resize(OutputSize());
for (const auto i : c10::irange(mainInputIndex, inputSize)) {
auto& input = Input(i);
if (i > mainInputIndex) {
CAFFE_ENFORCE_GE(
input.dim(),
main_input.dim(),
"Prefix of extra input's shape must match main input's shape, ",
"input: ",
i);
for (const auto j : c10::irange(main_input.dim())) {
CAFFE_ENFORCE_GE(
input.size(j),
main_input.size(j),
"Prefix of extra input's shape must match main input's shape, ",
"input: ",
i,
", dim ",
j);
}
}
raw_datas_[i] = input.raw_data();
block_sizes_[i] = input.size_from_dim(main_input.dim());
metas_[i] = input.dtype();
// shape = partition_size + suffix of input dims
vector<int64_t> shape(
input.sizes().begin() + main_input.dim() - 1, input.sizes().end());
for (const auto j : c10::irange(partitions)) {
int out_idx = i + j * inputSize;
auto output = Output(out_idx);
shape[0] = counts_[j];
output->Resize(shape);
out_datas_[out_idx] = output->raw_mutable_data(input.dtype());
}
}
counts_.assign(partitions, 0);
for (const auto p : c10::irange(size)) {
int shard = moduloPartition(data[p], partitions);
int64_t idx = counts_[shard]++;
// special case first input
static_cast<Index*>(out_datas_[shard * inputSize + mainInputIndex])[idx] =
pack_first_input_ ? ((data[p] - shard) / partitions) : data[p];
int baseIndex = shard * inputSize;
for (int i = mainInputIndex + 1; i < inputSize; ++i) {
auto bs = block_sizes_[i];
auto meta = metas_[i];
// special case for small bs?
context_.CopyItemsSameDevice(
meta,
bs,
static_cast<const char*>(raw_datas_[i]) + p * bs * meta.itemsize(),
static_cast<char*>(out_datas_[baseIndex + i]) +
idx * bs * meta.itemsize());
}
}
}
bool pack_first_input_;
// use member fields to reuse memory
vector<int64_t> counts_;
vector<int64_t> block_sizes_;
vector<TypeMeta> metas_;
vector<const void*> raw_datas_;
vector<void*> out_datas_;
};
class PartitionOp : public PartitionOpBase {
public:
USE_DISPATCH_HELPER;
template <class... Args>
explicit PartitionOp(Args&&... args)
: PartitionOpBase(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(0));
}
private:
template <typename Index>
bool DoRunWithType() {
ApplyPartition<Index>(false /* skipFirstArgument */);
return true;
}
C10_DISABLE_COPY_AND_ASSIGN(PartitionOp);
};
class LengthsPartitionOp : public PartitionOpBase {
public:
USE_DISPATCH_HELPER;
template <class... Args>
explicit LengthsPartitionOp(Args&&... args)
: PartitionOpBase(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(1));
}
private:
template <typename Index>
bool DoRunWithType() {
CAFFE_ENFORCE(
OutputSize() % InputSize() == 0,
"Output number must be a multiple of input number");
int partitions = OutputSize() / InputSize();
CAFFE_ENFORCE_GT(partitions, 0, "Invalid number of partitions");
CAFFE_ENFORCE_EQ(
Input(1).dim(),
1,
"Only 1-D tensors supported as a partitioning tensor for sharding");
if (partitions == 1) {
// Specialization when partitions == 1 which just becomes a copy.
for (const auto i : c10::irange(InputSize())) {
auto& input = Input(i);
auto& output = *Output(i);
output.ResizeLike(input);
context_.CopyItemsSameDevice(
input.dtype(),
input.numel(),
input.raw_data(),
output.raw_mutable_data(input.dtype()));
}
return true;
}
// Apply sharding to all parameters except lengths
ApplyPartition<Index>(true /* skipFirstArgument */);
// Compute lengths after sharding
auto& main_input = Input(1);
int64_t size = main_input.numel();
const Index* data = main_input.template data<Index>();
auto& length_input = Input(0);
int64_t elements = length_input.numel();
const int32_t* lengths_data = length_input.template data<int32_t>();
out_length_.resize(partitions);
for (const auto i : c10::irange(partitions)) {
auto& output = *Output(i * InputSize());
output.Resize(elements);
out_length_[i] = output.template mutable_data<int32_t>();
}
int total_length = 0;
for (const auto i : c10::irange(elements)) {
total_length += lengths_data[i];
}
CAFFE_ENFORCE(
total_length == size,
"Total length is not matching to the number of elements");
int index = 0;
for (const auto i : c10::irange(elements)) {
for (const auto j : c10::irange(partitions)) {
out_length_[j][i] = 0;
}
for (int j = 0; j < lengths_data[i]; ++j, ++index) {
int shard = moduloPartition(data[index], partitions);
++out_length_[shard][i];
}
}
return true;
}
C10_DISABLE_COPY_AND_ASSIGN(LengthsPartitionOp);
vector<int32_t*> out_length_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_PARTITION_OPS_H_
| 10,081
| 30.704403
| 80
|
h
|
null |
pytorch-main/caffe2/operators/percentile_op.h
|
// Operator to calculate percentile values for an input tensor of data,
// given samples of data from the same distribution, labeled with their
// percentile values.
#ifndef CAFFE2_OPERATORS_PERCENTILE_OP_H_
#define CAFFE2_OPERATORS_PERCENTILE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/math.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(Percentile);
namespace caffe2 {
template <class Context>
class PercentileOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit PercentileOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override;
protected:
INPUT_TAGS(X, VAL_PCT_PAIRS, LENS);
OUTPUT_TAGS(PCT);
Tensor values_tensor;
Tensor percentiles_tensor;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_PERCENTILE_OP_H_
| 1,009
| 24.897436
| 71
|
h
|
null |
pytorch-main/caffe2/operators/piecewise_linear_transform_op.h
|
#ifndef CAFFE2_OPERATORS_PIECEWISE_LINEAR_TRANSFORM_OP_H_
#define CAFFE2_OPERATORS_PIECEWISE_LINEAR_TRANSFORM_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include <c10/util/irange.h>
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(PiecewiseLinearTransform);
namespace caffe2 {
template <typename T, class Context>
class PiecewiseLinearTransformOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit PiecewiseLinearTransformOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
binary_ = this->template GetSingleArgument<bool>("binary", false);
// Retrieve transform params (i.e., the linear functions).
bounds_from_arg_ = this->template GetRepeatedArgument<T>("bounds");
slopes_from_arg_ = this->template GetRepeatedArgument<T>("slopes");
intercepts_from_arg_ = this->template GetRepeatedArgument<T>("intercepts");
transform_param_from_arg_ = CheckTransParamFromArg();
}
bool RunOnDevice() override {
return binary_ ? TransformBinary() : TransformGeneral();
}
private:
// num_func_per_group is the number of pieces of linear functions of
// each group.
// num_group: The number of groups of linear functions. Each group is for
// transforming one column of predictions.
void InferNumFunctionsPerGroup(
const int64_t num_bounds,
const int64_t num_slopes,
const int64_t num_intercepts,
int64_t* num_func_per_group,
int64_t* num_group) {
CAFFE_ENFORCE_EQ(num_slopes, num_intercepts);
// This is based on the facts:
// 1. in each group, the num of bounds minus the num of slopes is 1;
// 2. each group has the same number of pieces.
*num_group = num_bounds - num_slopes;
CAFFE_ENFORCE_GT(*num_group, 0);
if (binary_) {
CAFFE_ENFORCE_EQ(*num_group, 1);
}
*num_func_per_group = num_slopes / *num_group;
CAFFE_ENFORCE_GT(*num_func_per_group, 0);
CAFFE_ENFORCE_EQ(num_slopes % *num_group, 0);
}
bool CheckBoundsSorted(
const T* bounds,
const int64_t num_bounds_per_group,
const int64_t num_group) {
const T* start = bounds;
for (const auto i : c10::irange(num_group)) {
(void)i; // CUDA-10.2 on Windows crashes when C10_UNUSED macro is used
if (!std::is_sorted(start, start + num_bounds_per_group)) {
return false;
}
start += num_bounds_per_group;
}
return true;
}
// Returns true if the transform params from arg are valid.
// Otherwise, we will assume the transform params will pass from Input blobs.
bool CheckTransParamFromArg() {
int good_param = 0;
good_param += bounds_from_arg_.size() > 0;
good_param += slopes_from_arg_.size() > 0;
good_param += intercepts_from_arg_.size() > 0;
CAFFE_ENFORCE(
good_param == 0 || good_param == 3,
"bounds, slopes, intercepts must be all set or all not set");
if (good_param == 3) {
int64_t num_func_per_group;
int64_t num_group;
InferNumFunctionsPerGroup(
bounds_from_arg_.size(),
slopes_from_arg_.size(),
intercepts_from_arg_.size(),
&num_func_per_group,
&num_group);
CAFFE_ENFORCE(
CheckBoundsSorted(
bounds_from_arg_.data(), num_func_per_group + 1, num_group),
"bounds must be sorted for each group");
}
return good_param == 3;
}
void setUpTensors(int64_t& num_func_per_group, int64_t& num_group, int64_t M);
void GetTransParamData(
const T** bounds,
const T** slopes,
const T** intercepts,
int64_t* num_func_per_group,
int64_t* num_group) {
int64_t num_bounds;
int64_t num_slopes;
int64_t num_intercepts;
if (transform_param_from_arg_) {
CAFFE_ENFORCE_EQ(InputSize(), 1);
*bounds = bounds_from_arg_.data();
*slopes = slopes_from_arg_.data();
*intercepts = intercepts_from_arg_.data();
num_bounds = bounds_from_arg_.size();
num_slopes = slopes_from_arg_.size();
num_intercepts = intercepts_from_arg_.size();
} else {
CAFFE_ENFORCE_EQ(InputSize(), 4);
auto& bounds_input = Input(BOUNDS);
auto& slopes_input = Input(SLOPES);
auto& intercepts_input = Input(INTERCEPTS);
*bounds = bounds_input.template data<T>();
*slopes = slopes_input.template data<T>();
*intercepts = intercepts_input.template data<T>();
num_bounds = bounds_input.numel();
num_slopes = slopes_input.numel();
num_intercepts = intercepts_input.numel();
}
InferNumFunctionsPerGroup(
num_bounds, num_slopes, num_intercepts, num_func_per_group, num_group);
}
bool TransformGeneral() {
auto& X = Input(0);
CAFFE_ENFORCE_EQ(X.dim(), 2);
int64_t N = X.dim32(0);
int64_t M = X.dim32(1);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
const auto* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
const T* bounds;
const T* slopes;
const T* intercepts;
int64_t num_func_per_group;
int64_t num_group;
GetTransParamData(
&bounds, &slopes, &intercepts, &num_func_per_group, &num_group);
CAFFE_ENFORCE_EQ(num_group, M);
for (const auto j : c10::irange(M)) {
const T* bounds_group = bounds + j * (num_func_per_group + 1);
const T* slopes_group = slopes + j * num_func_per_group;
const T* intercepts_group = intercepts + j * num_func_per_group;
for (const auto i : c10::irange(N)) {
Ydata[i * M + j] = PiecewiseLinearTransform(
Xdata[i * M + j],
bounds_group,
slopes_group,
intercepts_group,
num_func_per_group);
}
}
return true;
}
bool TransformBinary() {
auto& X = Input(PREDICTIONS);
CAFFE_ENFORCE(X.dim() == 1 || X.dim() == 2);
int64_t N = X.dim32(0);
int64_t M = X.dim() == 2 ? X.dim32(1) : 1;
CAFFE_ENFORCE(
M == 1 || M == 2,
"If binary is set to true, the input must be Nx2 or Nx1 tensor");
auto* Y = Output(0, X.sizes(), at::dtype<T>());
const auto* Xdata = X.template data<T>();
T* Ydata = Y->template mutable_data<T>();
const T* bounds;
const T* slopes;
const T* intercepts;
int64_t num_func_per_group;
int64_t num_group;
GetTransParamData(
&bounds, &slopes, &intercepts, &num_func_per_group, &num_group);
CAFFE_ENFORCE_EQ(num_group, 1);
if (M == 1) {
for (const auto i : c10::irange(N)) {
Ydata[i] = PiecewiseLinearTransform(
Xdata[i], bounds, slopes, intercepts, num_func_per_group);
}
} else {
for (const auto i : c10::irange(N)) {
Ydata[i * M + 1] = PiecewiseLinearTransform(
Xdata[i * M + 1], bounds, slopes, intercepts, num_func_per_group);
Ydata[i * M] = 1.0f - Ydata[i * M + 1];
}
}
return true;
}
T PiecewiseLinearTransform(
const T x,
const T* bounds,
const T* slopes,
const T* intercepts,
const int64_t num_func_per_group) {
T y = 0;
// deal with samples out of bounds
// make it the same as the upper/lower bound value
if (x <= bounds[0]) {
y = slopes[0] * bounds[0] + intercepts[0];
} else if (x >= bounds[num_func_per_group]) {
y = slopes[num_func_per_group - 1] * bounds[num_func_per_group] +
intercepts[num_func_per_group - 1];
} else {
auto low_bound =
std::lower_bound(bounds, bounds + num_func_per_group + 1, x);
int bounds_idx = low_bound - bounds - 1;
// compute the piecewise linear transformation as Y
y = slopes[bounds_idx] * x + intercepts[bounds_idx];
}
return y;
}
private:
bool binary_;
vector<T> bounds_from_arg_;
vector<T> slopes_from_arg_;
vector<T> intercepts_from_arg_;
Tensor bounds_device_{Context::GetDeviceType()};
Tensor intercepts_device_{Context::GetDeviceType()};
Tensor slopes_device_{Context::GetDeviceType()};
bool gpu_copied_ = false;
// If true, the piecewise linear functions are passed through args,
// otherwise, they are passed through Input blobs.
bool transform_param_from_arg_;
INPUT_TAGS(PREDICTIONS, BOUNDS, SLOPES, INTERCEPTS);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_PIECEWISE_LINEAR_TRANSFORM_OP_H_
| 8,407
| 31.715953
| 80
|
h
|
null |
pytorch-main/caffe2/operators/pool_op.h
|
#ifndef CAFFE2_OPERATORS_POOL_OP_H_
#define CAFFE2_OPERATORS_POOL_OP_H_
#include <vector>
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_pool_op_base.h"
namespace caffe2 {
template <typename T, class Context, class Functor>
class PoolOp final : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
template <class... Args>
explicit PoolOp(Args&&... args)
: ConvPoolOpBase<Context>(std::forward<Args>(args)...), functor_(*this) {
const int kernel_size = kernel_.size();
for (const auto i : c10::irange(kernel_size)) {
CAFFE_ENFORCE_EQ(
dilation_[i], 1, "Pooling op does not support dilation right now.");
}
if (!global_pooling_) {
for (const auto i : c10::irange(kernel_size)) {
CAFFE_ENFORCE(
pads_[i] < kernel_[i] && pads_[i + kernel_size] < kernel_[i],
"Pad should be smaller than kernel.");
}
}
}
~PoolOp() override = default;
bool RunOnDeviceWithOrderNCHW() override {
const auto& X = Input(0);
auto* Y = Output(0);
const int N = X.dim32(0);
const int C = X.dim32(1);
ConvPoolOpBase<Context>::SetOutputSize(X, Y, C);
const T* X_data = X.template data<T>();
T* Y_data = Y->template mutable_data<T>();
if (N == 0) {
return true;
}
if (global_pooling_) {
const int HxW = X.numel() / (N * C);
return functor_.template GlobalPoolingForward<T, StorageOrder::NCHW>(
N, C, HxW, X_data, Y_data, &context_);
}
const std::vector<int> X_HW_dims = GetDims(X);
const std::vector<int> Y_HW_dims = GetDims(*Y);
return functor_.template Forward<T, StorageOrder::NCHW>(
N,
C,
X_HW_dims,
Y_HW_dims,
kernel_,
dilation_,
stride_,
pads_,
X.template data<T>(),
Y->template mutable_data<T>(),
&context_);
}
bool RunOnDeviceWithOrderNHWC() override {
const auto& X = Input(0);
auto* Y = Output(0);
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
ConvPoolOpBase<Context>::SetOutputSize(X, Y, C);
const T* X_data = X.template data<T>();
T* Y_data = Y->template mutable_data<T>();
if (N == 0) {
return true;
}
if (global_pooling_) {
const int HxW = X.numel() / (N * C);
return functor_.template GlobalPoolingForward<T, StorageOrder::NHWC>(
N, C, HxW, X_data, Y_data, &context_);
}
const std::vector<int> X_HW_dims = GetDims(X);
const std::vector<int> Y_HW_dims = GetDims(*Y);
return functor_.template Forward<T, StorageOrder::NHWC>(
N,
C,
X_HW_dims,
Y_HW_dims,
kernel_,
dilation_,
stride_,
pads_,
X.template data<T>(),
Y->template mutable_data<T>(),
&context_);
}
private:
const Functor functor_;
};
template <typename T, class Context, class Functor>
class PoolGradientOp final : public ConvPoolOpBase<Context> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(Context);
template <class... Args>
explicit PoolGradientOp(Args&&... args)
: ConvPoolOpBase<Context>(std::forward<Args>(args)...), functor_(*this) {}
~PoolGradientOp() override = default;
bool RunOnDeviceWithOrderNCHW() override {
const auto& X = Input(0);
const auto& Y = Input(1);
const auto& dY = Input(2);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
const int N = X.dim32(0);
const int C = X.dim32(1);
const std::vector<int> X_HW_dims = GetDims(X);
const std::vector<int> Y_HW_dims = GetDims(Y);
ConvPoolOpBase<Context>::ComputePads(X_HW_dims);
const T* dY_data = dY.template data<T>();
const T* X_data = X.template data<T>();
const T* Y_data = Y.template data<T>();
T* dX_data = dX->template mutable_data<T>();
if (N == 0) {
return true;
}
if (global_pooling_) {
const int HxW = X.numel() / (N * C);
return functor_.template GlobalPoolingBackward<T, StorageOrder::NCHW>(
N, C, HxW, dY_data, X_data, Y_data, dX_data, &context_);
}
return functor_.template Backward<T, StorageOrder::NCHW>(
N,
C,
X_HW_dims,
Y_HW_dims,
kernel_,
dilation_,
stride_,
pads_,
dY_data,
X_data,
Y_data,
dX_data,
&context_);
}
bool RunOnDeviceWithOrderNHWC() override {
const auto& X = Input(0);
const auto& Y = Input(1);
const auto& dY = Input(2);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
const int ndim = X.dim();
const int N = X.dim32(0);
const int C = X.dim32(ndim - 1);
const std::vector<int> X_HW_dims = GetDims(X);
const std::vector<int> Y_HW_dims = GetDims(Y);
ConvPoolOpBase<Context>::ComputePads(X_HW_dims);
const T* dY_data = dY.template data<T>();
const T* X_data = X.template data<T>();
const T* Y_data = Y.template data<T>();
T* dX_data = dX->template mutable_data<T>();
if (N == 0) {
return true;
}
if (global_pooling_) {
const int HxW = X.numel() / (N * C);
return functor_.template GlobalPoolingBackward<T, StorageOrder::NHWC>(
N, C, HxW, dY_data, X_data, Y_data, dX_data, &context_);
}
return functor_.template Backward<T, StorageOrder::NHWC>(
N,
C,
X_HW_dims,
Y_HW_dims,
kernel_,
dilation_,
stride_,
pads_,
dY_data,
X_data,
Y_data,
dX_data,
&context_);
}
private:
const Functor functor_;
};
template <class Context>
struct AveragePoolFunctor {
explicit AveragePoolFunctor(const OperatorBase& op)
: count_include_pad(
op.template GetSingleArgument<bool>("count_include_pad", false)) {}
template <typename T, StorageOrder kOrder>
bool GlobalPoolingForward(
int N,
int C,
int HxW,
const T* X,
T* Y,
Context* context) const;
template <typename T, StorageOrder kOrder>
bool Forward(
int N,
int C,
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const std::vector<int>& kernel,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& pads,
const T* X,
T* Y,
Context* context) const;
template <typename T, StorageOrder kOrder>
bool GlobalPoolingBackward(
int N,
int C,
int HxW,
const T* dY,
const T* X,
const T* Y,
T* dX,
Context* context) const;
template <typename T, StorageOrder kOrder>
bool Backward(
int N,
int C,
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const std::vector<int>& kernel,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& pads,
const T* dY,
const T* X,
const T* Y,
T* dX,
Context* context) const;
const bool count_include_pad;
Tensor ones{Context::GetDeviceType()};
};
template <class Context>
struct MaxPoolFunctor {
explicit MaxPoolFunctor(const OperatorBase& /* op */) {}
template <typename T, StorageOrder kOrder>
bool GlobalPoolingForward(
int N,
int C,
int HxW,
const T* X,
T* Y,
Context* context) const;
template <typename T, StorageOrder kOrder>
bool Forward(
int N,
int C,
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const std::vector<int>& kernel,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& pads,
const T* X,
T* Y,
Context* context) const;
template <typename T, StorageOrder kOrder>
bool GlobalPoolingBackward(
int N,
int C,
int HxW,
const T* dY,
const T* X,
const T* Y,
T* dX,
Context* context) const;
template <typename T, StorageOrder kOrder>
bool Backward(
int N,
int C,
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const std::vector<int>& kernel,
const std::vector<int>& dilation,
const std::vector<int>& stride,
const std::vector<int>& pads,
const T* dY,
const T* X,
const T* Y,
T* dX,
Context* context) const;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_POOL_OP_H_
| 8,559
| 26.088608
| 80
|
h
|
null |
pytorch-main/caffe2/operators/pow_op.h
|
#ifndef CAFFE2_OPERATORS_POW_OP_H_
#define CAFFE2_OPERATORS_POW_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/operators/elementwise_ops_utils.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <
typename InputTypes,
class Context,
class Functor,
class TypeMap = SameTypeAsInput>
class PowOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit PowOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(bool, "broadcast", enable_broadcast_, 0),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, ""),
OP_SINGLE_ARG(string, "order", order_, "NCHW"),
functor_() {
if ((InputSize() == 1) && HasArgument("exponent")) { // UnaryElementwiseOp
exponent_ = this->template GetSingleArgument<float>(
"exponent", 0); // based on pow_ops.h
} else if (InputSize() == 2) { // BinaryElementwiseOp
// Figure out the correct axis to use.
if (enable_broadcast_) {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0U,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1U, "Unsupported axis string", axis_str_);
size_t semantic_axis_ = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis_,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis_;
}
} else {
CAFFE_ENFORCE(
axis_ == -1 && axis_str_.empty(),
"Do not specify axis or axis_str if broadcast is not enabled.");
}
} else {
CAFFE_THROW(
"Only a tensor with an argument or two input tensors are supported as input to pow operator.");
}
}
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
if ((InputSize() == 1) && HasArgument("exponent")) { // UnaryElementwiseOp
const auto& A = Input(0);
auto* C =
Output(0, A.sizes(), at::dtype<typename TypeMap::template type<T>>());
const T* Adata = A.template data<T>();
auto* Cdata =
C->template mutable_data<typename TypeMap::template type<T>>();
functor_.template Run<true, T, float, T>(
A.numel(), Adata, NULL, exponent_, Cdata, &context_);
} else if (InputSize() == 2) { // BinaryElementwiseOp
const auto& A = Input(0);
const auto& B = Input(1);
CAFFE_ENFORCE(
!IsInputOutputAlias(1, 0) || !enable_broadcast_,
"In-place is allowed only with the first tensor when broadcasting");
auto* C =
Output(0, A.sizes(), at::dtype<typename TypeMap::template type<T>>());
const T* Adata = A.template data<T>();
const T* Bdata = B.template data<T>();
auto* Cdata =
C->template mutable_data<typename TypeMap::template type<T>>();
if (!enable_broadcast_) {
CAFFE_ENFORCE_EQ(
A.sizes(),
B.sizes(),
"Dimension mismatch - did you forget to set broadcast=1?");
functor_.template Run<false, T, T, T>(
A.numel(), Adata, Bdata, 0, Cdata, &context_);
} else if (B.numel() == 1) {
functor_.template Run<true, T, T, T>(
A.numel(), Adata, Bdata, 0, Cdata, &context_);
} else {
size_t pre, n, post;
std::tie(pre, n, post) =
elementwise_ops_utils::ComputeLegacyBroadcastSizes(A, B, axis_);
if (post == 1) {
functor_.template RunWithBroadcast<T, T, T>(
Adata, Bdata, Cdata, pre, n, &context_);
} else {
functor_.template RunWithBroadcast2<T, T, T>(
Adata, Bdata, Cdata, pre, n, post, &context_);
}
}
} else {
CAFFE_THROW(
"Only a tensor with an argument or two input tensors are supported as input to pow operator.");
}
return true;
}
private:
bool enable_broadcast_;
int axis_;
string axis_str_;
string order_;
float exponent_;
Functor functor_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_POW_OP_H_
| 4,677
| 33.145985
| 105
|
h
|
null |
pytorch-main/caffe2/operators/prefetch_op.h
|
#ifndef CAFFE2_OPERATORS_PREFETCH_OP_H_
#define CAFFE2_OPERATORS_PREFETCH_OP_H_
#include <condition_variable>
#include <mutex>
#include <thread> // NOLINT
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
// PrefetchOperator is an operator that prefetches the next batch. It should
// almost always be used to read things from disk, so I am setting the input to
// zero blobs.
//
// For any operator that is derived from PrefetchOperator, it should
// explicitly call the Finalize() function in its destructor, so that the
// prefetching thread is properly destructed.
// Note: We inherit from OperatorBase since we control the
// synchronization properties of this operator ourselves (we inform
// the waiting producer after we synchronize). This is a special-case
// - you should generally inherit from Operator<Context> directly.
template <class Context>
class PrefetchOperator : public OperatorBase {
public:
PrefetchOperator(const OperatorDef& operator_def, Workspace* ws)
: OperatorBase(operator_def, ws),
context_(operator_def.device_option()),
prefetched_(false),
prefetch_success_(true),
finalize_(false),
no_prefetch_(GetSingleArgument<bool>("no_prefetch", false)) {
context_.SwitchToDevice();
}
~PrefetchOperator() noexcept override {
CHECK(finalize_ || !prefetch_thread_.get())
<< "YOU MADE A PROGRAMMING ERROR: derived class of PrefetchOperator "
"should call Finalize() in its destructor so the prefetching "
"thread is joined. ";
}
void Finalize() {
if (prefetch_thread_.get()) {
{
std::unique_lock<std::mutex> lock(prefetch_access_mutex_);
while (!prefetched_)
consumer_.wait(lock);
finalize_ = true;
prefetched_ = false;
}
producer_.notify_one();
prefetch_thread_->join();
prefetch_thread_.reset();
} else {
// If we never initialized the prefetch thread, just set
// finalize anyway.
finalize_ = true;
}
}
bool Run(int /* unused */ /*stream_id*/) override {
if (no_prefetch_) {
context_.SwitchToDevice();
bool result = Prefetch() && CopyPrefetched();
context_.FinishDeviceComputation();
return result;
}
// Note(jiayq): We only start the prefetch_thread at the Run() function
// instead of in the constructor, because the prefetch_thread needs to start
// after all derived classes' constructors finish.
if (!prefetch_thread_) {
prefetch_thread_.reset(
new std::thread([this] { this->PrefetchWorker(); }));
}
context_.SwitchToDevice();
std::unique_lock<std::mutex> lock(prefetch_access_mutex_);
while (!prefetched_)
consumer_.wait(lock);
if (!prefetch_success_) {
LOG(ERROR) << "Prefetching failed.";
return false;
}
if (!CopyPrefetched()) {
LOG(ERROR) << "Error when copying prefetched data.";
return false;
}
prefetched_ = false;
context_.FinishDeviceComputation();
producer_.notify_one();
return true;
}
void PrefetchWorker() {
context_.SwitchToDevice();
std::unique_lock<std::mutex> lock(prefetch_access_mutex_);
while (prefetched_)
producer_.wait(lock);
while (!finalize_) {
// We will need to run a FinishDeviceComputation() call because the
// prefetcher thread and the main thread are potentially using different
// streams (like on GPU).
try {
prefetch_success_ = Prefetch();
context_.FinishDeviceComputation();
} catch (const std::exception& e) {
// TODO: propagate exception_ptr to the caller side
LOG(ERROR) << "Prefetching error " << e.what();
prefetch_success_ = false;
}
prefetched_ = true;
consumer_.notify_one();
while (prefetched_)
producer_.wait(lock);
}
}
// You will need to implement this instead of the Run function.
virtual bool Prefetch() = 0;
virtual bool CopyPrefetched() = 0;
protected:
Context context_;
std::mutex prefetch_access_mutex_;
std::condition_variable producer_, consumer_;
// prefetched_ is used to tell the operator that it is done.
std::atomic<bool> prefetched_;
// prefetch_success_ is used to see if prefetching failed or not.
std::atomic<bool> prefetch_success_;
// finalize_ is used to tell the prefetcher to quit.
std::atomic<bool> finalize_;
unique_ptr<std::thread> prefetch_thread_;
// Whether to do prefetching or run this as a normal operator
const bool no_prefetch_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_PREFETCH_OP_H_
| 4,663
| 31.615385
| 80
|
h
|
null |
pytorch-main/caffe2/operators/prelu_op.h
|
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class PReluOp final : public Operator<Context> {
public:
template <class... Args>
explicit PReluOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
StorageOrder order_;
};
template <typename T, class Context>
class PReluGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit PReluGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
StorageOrder order_;
};
} // namespace caffe2
| 1,067
| 23.272727
| 74
|
h
|
null |
pytorch-main/caffe2/operators/prepend_dim_op.h
|
#ifndef CAFFE2_OPERATORS_PREPEND_DIM_OP_H_
#define CAFFE2_OPERATORS_PREPEND_DIM_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include <c10/util/irange.h>
namespace caffe2 {
template <class Context>
class PrependDimOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit PrependDimOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
dim_size_(this->template GetSingleArgument<int64_t>("dim_size", 0)) {
CAFFE_ENFORCE_GT(
dim_size_, 0, "Argument dim_size must be greater than zero.");
}
bool RunOnDevice() override {
auto& input = Input(0);
auto* output = Output(0);
CAFFE_ENFORCE(input.dim() > 0, "Input must be at least 1D.");
CAFFE_ENFORCE(
input.size(0) % dim_size_ == 0,
"First dimension must be multiple of prepend_dim. Current first dimension: ",
input.size(0));
vector<int64_t> actual_new_shape(input.dim() + 1);
actual_new_shape[0] = dim_size_;
actual_new_shape[1] = input.size(0) / dim_size_;
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto i : c10::irange(1, input.sizes().size())) {
actual_new_shape[i + 1] = input.size(i);
}
output->Resize(actual_new_shape);
if (output != &input) {
// If we are not doing in-place computation, a copy is needed.
context_.CopyItemsSameDevice(
input.dtype(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.dtype()));
}
return true;
}
private:
int64_t dim_size_;
};
template <class Context>
class MergeDimOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit MergeDimOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
auto& input = Input(0);
auto* output = Output(0);
CAFFE_ENFORCE(input.dim() > 1, "Input must be at least 2D.");
vector<int64_t> actual_new_shape(input.dim() - 1);
actual_new_shape[0] = input.size(0) * input.size(1);
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (int i = 1; i < input.sizes().size() - 1; ++i) {
actual_new_shape[i] = input.size(i + 1);
}
output->Resize(actual_new_shape);
if (output != &input) {
// If we are not doing in-place computation, a copy is needed.
context_.CopyItemsSameDevice(
input.dtype(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.dtype()));
}
return true;
}
private:
int64_t dim_size_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_PREPEND_DIM_OP_H_
| 2,800
| 27.292929
| 85
|
h
|
null |
pytorch-main/caffe2/operators/quant_decode_op.h
|
#ifndef QUANT_DECODE_OP_H_
#define QUANT_DECODE_OP_H_
#include <c10/util/irange.h>
#include <c10/util/typeid.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
namespace {
template <class CodebookT, class CodeT>
void Decode(
const Tensor& codebook,
const Tensor& codes,
/* optional */ const Tensor* const decoded_grad,
Tensor* const output,
bool resizeOnly) {
CAFFE_ENFORCE(codebook.IsType<CodebookT>());
auto* cb_ptr = codebook.data<CodebookT>();
int cb_size = codebook.numel();
CAFFE_ENFORCE(codes.IsType<CodeT>());
auto* code_ptr = codes.data<CodeT>();
if (decoded_grad == nullptr) {
// Forward pass: decode and store codebook values in output.
output->ResizeLike(codes);
auto* out_ptr = output->template mutable_data<CodebookT>();
if (resizeOnly) {
return;
}
int sz = output->numel();
for (C10_UNUSED const auto i : c10::irange(sz)) {
TORCH_DCHECK_LE(*code_ptr, cb_size);
*out_ptr++ = cb_ptr[*code_ptr++];
}
} else {
// Backward pass: decode and accumulate gradient w.r.t. codebook values.
CAFFE_ENFORCE_EQ(codes.numel(), decoded_grad->numel());
auto* gradient_ptr = decoded_grad->data<CodebookT>();
auto* const gradient_end = gradient_ptr + decoded_grad->numel();
CAFFE_ENFORCE_EQ(cb_size, output->numel());
auto* out_ptr = output->template mutable_data<CodebookT>();
while (gradient_ptr < gradient_end) {
TORCH_DCHECK_LE(*code_ptr, cb_size);
out_ptr[*code_ptr++] += *gradient_ptr++;
}
}
}
#define REGISTER_DECODER(codebookType, codesType) \
{ \
{TypeMeta::Id<codebookType>(), TypeMeta::Id<codesType>()}, \
[](const Tensor& codebook_, \
const Tensor& codes_, \
const Tensor* gradient_, \
Tensor* outDecoded_, \
bool resizeOnly_) { \
Decode<codebookType, codesType>( \
codebook_, codes_, gradient_, outDecoded_, resizeOnly_); \
} \
}
inline void DecodeGeneral(
const Tensor& codebook,
const Tensor& codes,
const Tensor* gradient,
Tensor* outDecoded,
bool resizeOnly) {
const static std::map<
std::pair<TypeIdentifier, TypeIdentifier>,
std::function<void(
const Tensor& codebook,
const Tensor& codes,
const Tensor* gradient,
Tensor* outDecoded,
bool resizeOnly)>>
gDecoderMapper = {REGISTER_DECODER(float, uint8_t),
REGISTER_DECODER(float, uint16_t),
REGISTER_DECODER(float, int32_t)};
gDecoderMapper.at({codebook.dtype().id(), codes.dtype().id()})(
codebook, codes, gradient, outDecoded, resizeOnly);
}
} // namespace
// Decode tensors based on given codebook,
// The codebook is generated by model_quantize.py
enum class QuantDecodeRunTy {
RUN_ALWAYS,
RUN_ONCE,
};
template <QuantDecodeRunTy QuantDecodeRun>
class QuantDecodeOp final : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
template <class... Args>
explicit QuantDecodeOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {}
~QuantDecodeOp() override {}
bool RunOnDevice() override {
CAFFE_ENFORCE_GT(InputSize(), 1);
// first input is the codebook
CAFFE_ENFORCE_EQ(InputSize(), OutputSize() + 1);
const auto& codebook = Input(0);
CAFFE_ENFORCE(codebook.template IsType<float>(), codebook.dtype().name());
for (const auto i : c10::irange(OutputSize())) {
auto& ci = Input(i + 1);
auto* co = Output(i);
DecodeGeneral(
codebook,
ci,
nullptr,
co,
/*resizeOnly=*/QuantDecodeRun == QuantDecodeRunTy::RUN_ONCE &&
hasRun_);
}
hasRun_ = true;
return true;
}
private:
bool hasRun_{false};
};
class QuantDecodeGradientOp final : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
template <class... Args>
explicit QuantDecodeGradientOp(Args&&... args)
: Operator<CPUContext>(std::forward<Args>(args)...) {}
~QuantDecodeGradientOp() override {}
bool RunOnDevice() override {
// Inputs: 1 codebook, n tensors of codes, and n corresponding gradients.
CAFFE_ENFORCE(InputSize() >= 3 && InputSize() % 2 == 1);
const int num_code_tensors = (InputSize() - 1) / 2;
CAFFE_ENFORCE_EQ(OutputSize(), 1);
const auto& codebook = Input(0);
CAFFE_ENFORCE(codebook.template IsType<float>(), codebook.dtype().name());
auto* gradient = Output(0, codebook.sizes(), at::dtype<float>());
auto* gradient_ptr = gradient->template mutable_data<float>();
std::fill(gradient_ptr, gradient_ptr + gradient->numel(), 0);
for (const auto i : c10::irange(num_code_tensors)) {
auto& codes_i = Input(i + 1);
auto& output_gradient_i = Input(i + num_code_tensors + 1);
DecodeGeneral(codebook, codes_i, &output_gradient_i, gradient, false);
}
return true;
}
};
} // namespace caffe2
#endif // QUANT_DECODE_OP_H_
| 5,465
| 30.595376
| 78
|
h
|
null |
pytorch-main/caffe2/operators/quantile_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
#include <cmath>
#include <limits>
namespace caffe2 {
template <typename Context>
class QuantileOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
QuantileOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
quantile_(this->template GetSingleArgument<float>("quantile", -1.0)),
abs_(this->template GetSingleArgument<bool>("abs", true)),
tol_(this->template GetSingleArgument<float>("tol", 1e-3)) {
CAFFE_ENFORCE_GE(
quantile_,
0,
"input quantile should be ",
"no less than 0, got ",
quantile_);
CAFFE_ENFORCE_GE(
1.0f,
quantile_,
"input quantile should be ",
"no larger than 1, got ",
quantile_);
CAFFE_ENFORCE_GT(
tol_, 0, "tolerance should be ", "no less than 0, got ", tol_);
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
Output(QUANTILE_VAL)->Resize(1);
auto* quantile_val = Output(QUANTILE_VAL)->template mutable_data<T>();
auto& input_zero = Input(0);
int64_t numel = input_zero.numel();
for (const auto i : c10::irange(1, InputSize())) {
CAFFE_ENFORCE_EQ(
Input(i).dtype(),
input_zero.dtype(),
"All inputs must have the same type, expected: ",
input_zero.dtype().name(),
" but got: ",
Input(i).dtype().name(),
" for input: ",
i);
numel += Input(i).numel();
}
CAFFE_ENFORCE_GT(
numel,
0,
"number of total element in input tensor should be ",
"larger than 0, got ",
numel);
// the expected number of elements lessEq to the target value
const int64_t target_cnt =
static_cast<int64_t>(std::ceil(numel * quantile_));
T hi = 0.0;
T lo = 0.0;
GetRangeFromInputs(&lo, &hi);
if (target_cnt == 0) {
// lowest possible value
quantile_val[0] = lo;
return true;
}
if (target_cnt == numel) {
// highest possible value
quantile_val[0] = hi;
return true;
}
int64_t lo_cnt = CountLowerEq(lo);
if (lo_cnt >= target_cnt) {
// the target is one of the lowest value
quantile_val[0] = lo;
return true;
}
while (std::abs(hi - lo) > tol_ * (std::abs(hi) + std::abs(lo))) {
// keep hi_cnt > target_idx and lo_cnt <= target_idx
const T mid = lo + (hi - lo) / 2.0;
const int64_t mid_cnt = CountLowerEq(mid);
if (mid_cnt > target_cnt) {
CAFFE_ENFORCE_NE(
hi, mid, "numeric precision at limit, unable to continue bisect");
hi = mid;
} else if (mid_cnt < target_cnt) {
CAFFE_ENFORCE_NE(
lo, mid, "numeric precision at limit, unable to continue bisect");
lo = mid;
} else {
// mid_cnt == target_cnt
quantile_val[0] = mid;
return true;
}
}
quantile_val[0] = hi;
return true;
}
protected:
float quantile_;
bool abs_;
float tol_;
OUTPUT_TAGS(QUANTILE_VAL);
template <typename T>
void GetRangeFromInputs(T* lo, T* hi) {
*hi = std::numeric_limits<T>::lowest();
*lo = std::numeric_limits<T>::max();
for (const auto i : c10::irange(InputSize())) {
const auto* input = Input(i).template data<T>();
for (const auto j : c10::irange(Input(i).numel())) {
const T val = abs_ ? std::abs(input[j]) : input[j];
if (*hi < val) {
*hi = val;
}
if (*lo > val) {
*lo = val;
}
}
}
}
template <typename T>
int64_t CountLowerEq(const T& thd) {
int64_t count = 0;
for (const auto i : c10::irange(InputSize())) {
const auto* input = Input(i).template data<T>();
for (const auto j : c10::irange(Input(i).numel())) {
const T val = abs_ ? std::abs(input[j]) : input[j];
if (val <= thd) {
count++;
}
}
}
return count;
}
};
} // namespace caffe2
| 4,193
| 26.592105
| 78
|
h
|
null |
pytorch-main/caffe2/operators/rank_loss_op.h
|
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// support multiple batches of sessions
template <typename T, class Context>
class PairWiseLossOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(PairWiseLossOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
private:
INPUT_TAGS(XVALUE, LABEL, LENGTHS);
OUTPUT_TAGS(YVALUE);
};
template <typename T, class Context>
class PairWiseLossGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(PairWiseLossGradientOp);
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
private:
INPUT_TAGS(XVALUE, LABEL, DYVALUE, LENGTHS);
OUTPUT_TAGS(DXVALUE);
};
} // namespace caffe2
| 820
| 21.805556
| 63
|
h
|
null |
pytorch-main/caffe2/operators/reciprocal_op.h
|
#ifndef CAFFE2_OPERATORS_RECIPROCAL_OP_H_
#define CAFFE2_OPERATORS_RECIPROCAL_OP_H_
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
struct ReciprocalFunctor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const {
math::Inv(N, X, Y, context);
return true;
}
};
template <class Context>
struct ReciprocalGradientFunctor {
template <typename T>
bool Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& dY_dims,
const T* Y,
const T* dY,
T* dX,
Context* context) const;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_RECIPROCAL_OP_H_
| 721
| 20.878788
| 74
|
h
|
null |
pytorch-main/caffe2/operators/reduce_front_back_max_ops.h
|
#ifndef CAFFE2_OPERATORS_REDUCE_FRONT_BACK_MAX_OPS_H_
#define CAFFE2_OPERATORS_REDUCE_FRONT_BACK_MAX_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <typename T, class Context, bool FIRSTDIMS>
class MaxReduceDimsOp final : public Operator<Context> {
public:
template <class... Args>
explicit MaxReduceDimsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
num_reduce_dims_(
this->template GetSingleArgument<int32_t>("num_reduce_dim", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto& X = Input(0);
CAFFE_ENFORCE(
num_reduce_dims_ >= 0 && num_reduce_dims_ <= X.dim(),
"For N-dim input tensor, support num_reduce_dims in range [0, N].");
const int rows = FIRSTDIMS ? X.size_to_dim(num_reduce_dims_)
: X.size_to_dim(X.dim() - num_reduce_dims_);
const int cols = FIRSTDIMS ? X.size_from_dim(num_reduce_dims_)
: X.size_from_dim(X.dim() - num_reduce_dims_);
vector<int64_t> output_shape;
int start_index = FIRSTDIMS ? num_reduce_dims_ : 0;
int end_index = FIRSTDIMS ? X.dim() : X.dim() - num_reduce_dims_;
for (const auto i : c10::irange(start_index, end_index)) {
output_shape.push_back(X.sizes()[i]);
}
auto* Y = Output(0, output_shape, at::dtype<float>());
float* out_data = Y->template mutable_data<float>();
if (cols == 0 || rows == 0) {
math::Set(Y->numel(), static_cast<float>(0), out_data, &context_);
return true;
}
const int32_t* lengths_data = nullptr;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_data = lengths.template data<int32_t>();
CAFFE_ENFORCE(
num_reduce_dims_ == 1,
"Given lengths input, the number of reduce dimensions should be one.");
const int batch_size = FIRSTDIMS ? cols : rows;
CAFFE_ENFORCE(
lengths.numel() == batch_size,
"The size of lengths vector doesn't match the batch size.");
}
const float* data = X.template data<float>();
Compute(rows, cols, data, lengths_data, out_data);
return true;
}
protected:
void Compute(
int rows,
int cols,
const float* data,
const int32_t* lengths_data,
float* out_data);
int num_reduce_dims_;
};
template <typename T, class Context, bool FIRSTDIMS>
class MaxReduceDimsGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit MaxReduceDimsGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
num_reduce_dims_(
this->template GetSingleArgument<int32_t>("num_reduce_dim", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto& dY = Input(0);
auto& X = Input(1);
auto& Y = Input(2);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
const int rows = FIRSTDIMS ? X.size_to_dim(num_reduce_dims_)
: X.size_to_dim(X.dim() - num_reduce_dims_);
const int cols = FIRSTDIMS ? X.size_from_dim(num_reduce_dims_)
: X.size_from_dim(X.dim() - num_reduce_dims_);
const float* dYdata = dY.template data<float>();
const float* Xdata = X.template data<float>();
const float* Ydata = Y.template data<float>();
const int32_t* lengths_data = nullptr;
if (InputSize() > 3) {
const auto& lengths = Input(3);
lengths_data = lengths.template data<int32_t>();
CAFFE_ENFORCE(
num_reduce_dims_ == 1,
"Given lengths input, the number of reduce dimensions should be one.");
const int batch_size = FIRSTDIMS ? cols : rows;
CAFFE_ENFORCE(
lengths.numel() == batch_size,
"The size of lengths vector doesn't match the batch size.");
}
float* dXdata = dX->template mutable_data<float>();
Compute(rows, cols, dYdata, Xdata, Ydata, lengths_data, dXdata);
return true;
}
protected:
void Compute(
int rows,
int cols,
const float* dYdata,
const float* Xdata,
const float* Ydata,
const int32_t* lengths_data,
float* dXdata);
int num_reduce_dims_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_REDUCE_FRONT_BACK_MAX_OPS_H_
| 4,448
| 31.007194
| 81
|
h
|
null |
pytorch-main/caffe2/operators/reduce_front_back_sum_mean_ops.h
|
#ifndef CAFFE2_OPERATORS_REDUCE_FRONT_BACK_SUM_MEAN_OPS_H_
#define CAFFE2_OPERATORS_REDUCE_FRONT_BACK_SUM_MEAN_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context, bool FIRSTDIMS, bool NORMALIZE>
class SumReduceDimsOp final : public Operator<Context> {
public:
template <class... Args>
explicit SumReduceDimsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
num_reduce_dims_(
this->template GetSingleArgument<int32_t>("num_reduce_dim", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int, int64_t, float, double>>::call(
this, Input(0));
}
template <typename T>
bool DoRunWithType() {
auto& X = Input(0);
CAFFE_ENFORCE(
num_reduce_dims_ >= 0 && num_reduce_dims_ <= X.dim(),
"For N-dim input tensor, support num_reduce_dims in range [0, N].");
vector<int64_t> output_shape;
int start_index = FIRSTDIMS ? num_reduce_dims_ : 0;
int end_index = FIRSTDIMS ? X.dim() : X.dim() - num_reduce_dims_;
for (const auto i : c10::irange(start_index, end_index)) {
output_shape.push_back(X.sizes()[i]);
}
auto* Y = Output(0, output_shape, at::dtype<T>());
const int rows = FIRSTDIMS ? X.size_to_dim(num_reduce_dims_)
: X.size_to_dim(X.dim() - num_reduce_dims_);
const int cols = FIRSTDIMS ? X.size_from_dim(num_reduce_dims_)
: X.size_from_dim(X.dim() - num_reduce_dims_);
const T* in_data = X.template data<T>();
T* out_data = Y->template mutable_data<T>();
if (cols == 0 || rows == 0) {
math::Set(Y->numel(), static_cast<T>(0), out_data, &context_);
return true;
}
const int32_t* lengths_data = nullptr;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_data = lengths.template data<int32_t>();
CAFFE_ENFORCE(
num_reduce_dims_ == 1,
"Given lengths input, the number of reduce dimensions should be one.");
const int batch_size = FIRSTDIMS ? cols : rows;
CAFFE_ENFORCE(
lengths.numel() == batch_size,
"The size of lengths vector doesn't match the batch size.");
}
Compute(rows, cols, in_data, lengths_data, out_data);
return true;
}
private:
template <typename T>
void Compute(
int rows,
int cols,
const T* in_data,
const int32_t* lengths_data,
T* out_data);
int num_reduce_dims_;
};
template <class Context, bool FIRSTDIMS, bool NORMALIZE>
class SumReduceDimsGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit SumReduceDimsGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
num_reduce_dims_(
this->template GetSingleArgument<int32_t>("num_reduce_dim", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int, long, float, double>>::call(
this, Input(0));
}
template <typename T>
bool DoRunWithType() {
auto& dY = Input(0);
auto& input_1 = Input(1);
vector<int64_t> dX_sizes;
// In previous diff we changed the semantic: Input(1) was changed from
// the shape of the input to the data tensor. This made the backward
// computation incompatible with old models. To fix this, we check
// the dimension and type of Input(1).
if (input_1.dim() == 1 && input_1.template IsType<int64_t>()) {
// Input(1) is the shape of the input
shape_.CopyFrom(input_1);
// Copy first dims
dX_sizes = vector<int64_t>(
shape_.template data<int64_t>(),
shape_.template data<int64_t>() + shape_.numel());
} else {
// Input(1) is data tensor X
dX_sizes = input_1.sizes().vec();
}
auto* dX = Output(0, dX_sizes, at::dtype<T>());
const int rows = FIRSTDIMS ? dX->size_to_dim(num_reduce_dims_)
: dX->size_to_dim(dX->dim() - num_reduce_dims_);
const int cols = FIRSTDIMS
? dX->size_from_dim(num_reduce_dims_)
: dX->size_from_dim(dX->dim() - num_reduce_dims_);
const int32_t* lengths_data = nullptr;
if (InputSize() > 2) {
const auto& lengths = Input(2);
lengths_data = lengths.template data<int32_t>();
CAFFE_ENFORCE(
num_reduce_dims_ == 1,
"Given lengths input, the number of reduce dimensions should be one.");
const int batch_size = FIRSTDIMS ? cols : rows;
CAFFE_ENFORCE(
lengths.numel() == batch_size,
"The size of lengths vector doesn't match the batch size.");
}
const T* dYdata = dY.template data<T>();
T* dXdata = dX->template mutable_data<T>();
Compute<T>(rows, cols, dYdata, lengths_data, dXdata);
return true;
}
private:
template <typename T>
void Compute(
int rows,
int cols,
const T* dYdata,
const int32_t* lengths_data,
T* dXdata);
int num_reduce_dims_;
// scratch space used for former version of this reducer
Tensor shape_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_REDUCE_FRONT_BACK_SUM_MEAN_OPS_H_
| 5,377
| 31.203593
| 81
|
h
|
null |
pytorch-main/caffe2/operators/reduce_ops.h
|
#ifndef CAFFE2_OPERATORS_REDUCE_OPS_H_
#define CAFFE2_OPERATORS_REDUCE_OPS_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
#include <c10/util/irange.h>
#include <algorithm>
#include <functional>
#include <vector>
namespace caffe2 {
template <typename InputTypes, class Context, class Reducer>
class ReduceOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ReduceOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axes_(this->template GetRepeatedArgument<int>("axes")),
OP_SINGLE_ARG(bool, "keepdims", keep_dims_, true),
reducer_{this->template GetSingleArgument<bool>("allow_broadcast_fastpath", false)} {}
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& X = Input(0);
const int ndim = X.dim();
const std::vector<int> X_dims(X.sizes().cbegin(), X.sizes().cend());
if (axes_.empty()) {
axes_.resize(ndim);
std::iota(axes_.begin(), axes_.end(), 0);
} else {
for (auto& axis : axes_) {
axis = X.canonical_axis_index(axis);
}
std::sort(axes_.begin(), axes_.end());
CAFFE_ENFORCE_GE(axes_.front(), 0, "Axes ids must be non-negative.");
CAFFE_ENFORCE_LT(
axes_.back(),
ndim,
"Axes ids must be smaller than the dimensions of input.");
}
std::vector<int64_t> output_dims;
output_dims.reserve(ndim);
std::size_t cur_axis = 0;
for (const auto i : c10::irange(ndim)) {
if (cur_axis < axes_.size() && i == axes_[cur_axis]) {
if (keep_dims_) {
output_dims.push_back(1);
}
++cur_axis;
} else {
output_dims.push_back(X_dims[i]);
}
}
auto* Y = Output(0, output_dims, at::dtype<T>());
std::vector<int> Y_dims = X_dims;
for (const int axis : axes_) {
Y_dims[axis] = 1;
}
return reducer_.template Forward<T>(
X_dims,
Y_dims,
X.template data<T>(),
Y->template mutable_data<T>(),
&context_);
}
private:
std::vector<int> axes_;
const int keep_dims_;
const Reducer reducer_;
};
template <typename InputTypes, class Context, class Reducer>
class ReduceGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ReduceGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axes_(this->template GetRepeatedArgument<int>("axes")),
reducer_{this->template GetSingleArgument<bool>("allow_broadcast_fastpath", false)} {}
bool RunOnDevice() override {
return DispatchHelper<InputTypes>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& dY = Input(0);
const auto& X = Input(1);
const auto& Y = Input(2);
const int ndim = X.dim();
if (axes_.empty()) {
axes_.resize(ndim);
std::iota(axes_.begin(), axes_.end(), 0);
} else {
for (auto& axis : axes_) {
axis = X.canonical_axis_index(axis);
}
std::sort(axes_.begin(), axes_.end());
CAFFE_ENFORCE_GE(axes_.front(), 0, "Axes ids must be non-negative.");
CAFFE_ENFORCE_LT(
axes_.back(),
ndim,
"Axes ids must be smaller than the dimensions of input.");
}
const std::vector<int> dX_dims(X.sizes().cbegin(), X.sizes().cend());
std::vector<int> dY_dims = dX_dims;
for (const int axis : axes_) {
dY_dims[axis] = 1;
}
auto* dX = Output(0, X.sizes(), at::dtype<T>());
return reducer_.template Backward<T>(
dY_dims,
dX_dims,
dY.template data<T>(),
X.template data<T>(),
Y.template data<T>(),
dX->template mutable_data<T>(),
&context_);
}
private:
std::vector<int> axes_;
const Reducer reducer_;
};
template <class Context>
struct MinReducer {
explicit MinReducer(bool allow_broadcast_fastpath)
: allow_broadcast_fastpath_(allow_broadcast_fastpath) {}
template <typename T>
bool Forward(
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const T* X_data,
T* Y_data,
Context* context) const {
math::ReduceMin<T, Context>(
X_dims.size(),
X_dims.data(),
Y_dims.data(),
T(1),
X_data,
Y_data,
context,
allow_broadcast_fastpath_);
return true;
}
template <typename T>
bool Backward(
const std::vector<int>& dY_dims,
const std::vector<int>& dX_dims,
const T* dY_data,
const T* X_data,
const T* Y_data,
T* dX_data,
Context* context) const;
const bool allow_broadcast_fastpath_;
};
template <class Context>
struct MaxReducer {
explicit MaxReducer(bool allow_broadcast_fastpath)
: allow_broadcast_fastpath_(allow_broadcast_fastpath) {}
template <typename T>
bool Forward(
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const T* X_data,
T* Y_data,
Context* context) const {
math::ReduceMax<T, Context>(
X_dims.size(),
X_dims.data(),
Y_dims.data(),
T(1),
X_data,
Y_data,
context,
allow_broadcast_fastpath_);
return true;
}
template <typename T>
bool Backward(
const std::vector<int>& dY_dims,
const std::vector<int>& dX_dims,
const T* dY_data,
const T* X_data,
const T* Y_data,
T* dX_data,
Context* context) const;
const bool allow_broadcast_fastpath_;
};
template <class Context>
struct SumReducer {
explicit SumReducer(bool allow_broadcast_fastpath)
: allow_broadcast_fastpath_(allow_broadcast_fastpath) {}
template <typename T>
bool Forward(
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const T* X_data,
T* Y_data,
Context* context) const {
math::ReduceSum<T, Context>(
X_dims.size(),
X_dims.data(),
Y_dims.data(),
T(1),
X_data,
Y_data,
context,
allow_broadcast_fastpath_);
return true;
}
template <typename T>
bool Backward(
const std::vector<int>& dY_dims,
const std::vector<int>& dX_dims,
const T* dY_data,
const T* /* X_data */,
const T* /* Y_data */,
T* dX_data,
Context* context) const {
math::Broadcast(
dY_dims.size(),
dY_dims.data(),
dX_dims.size(),
dX_dims.data(),
T(1),
dY_data,
dX_data,
context,
allow_broadcast_fastpath_);
return true;
}
const bool allow_broadcast_fastpath_;
};
template <class Context>
struct MeanReducer {
explicit MeanReducer(bool allow_broadcast_fastpath)
: allow_broadcast_fastpath_(allow_broadcast_fastpath) {}
template <typename T>
bool Forward(
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const T* X_data,
T* Y_data,
Context* context) const {
math::ReduceMean<T, Context>(
X_dims.size(),
X_dims.data(),
Y_dims.data(),
T(1),
X_data,
Y_data,
context,
allow_broadcast_fastpath_);
return true;
}
template <typename T>
bool Backward(
const std::vector<int>& dY_dims,
const std::vector<int>& dX_dims,
const T* dY_data,
const T* /* X_data */,
const T* /* Y_data */,
T* dX_data,
Context* context) const {
const int dY_size = std::accumulate(
dY_dims.cbegin(), dY_dims.cend(), 1, std::multiplies<int>());
const int dX_size = std::accumulate(
dX_dims.cbegin(), dX_dims.cend(), 1, std::multiplies<int>());
math::Broadcast(
dY_dims.size(),
dY_dims.data(),
dX_dims.size(),
dX_dims.data(),
static_cast<T>(dY_size) / static_cast<T>(dX_size),
dY_data,
dX_data,
context,
allow_broadcast_fastpath_);
return true;
}
const bool allow_broadcast_fastpath_;
};
template <class Context>
struct L1Reducer {
explicit L1Reducer(bool allow_broadcast_fastpath)
: allow_broadcast_fastpath_(allow_broadcast_fastpath) {}
template <typename T>
bool Forward(
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const T* X_data,
T* Y_data,
Context* context) const {
math::ReduceL1<T, Context>(
X_dims.size(),
X_dims.data(),
Y_dims.data(),
T(1),
X_data,
Y_data,
context,
allow_broadcast_fastpath_);
return true;
}
template <typename T>
bool Backward(
const std::vector<int>& dY_dims,
const std::vector<int>& dX_dims,
const T* dY_data,
const T* X_data,
const T* Y_data,
T* dX_data,
Context* context) const;
const bool allow_broadcast_fastpath_;
};
template <class Context>
struct L2Reducer {
explicit L2Reducer(bool allow_broadcast_fastpath)
: allow_broadcast_fastpath_(allow_broadcast_fastpath) {}
template <typename T>
bool Forward(
const std::vector<int>& X_dims,
const std::vector<int>& Y_dims,
const T* X_data,
T* Y_data,
Context* context) const {
math::ReduceL2<T, Context>(
X_dims.size(),
X_dims.data(),
Y_dims.data(),
T(1),
X_data,
Y_data,
context,
allow_broadcast_fastpath_);
return true;
}
template <typename T>
bool Backward(
const std::vector<int>& dY_dims,
const std::vector<int>& dX_dims,
const T* dY_data,
const T* X_data,
const T* Y_data,
T* dX_data,
Context* context) const;
const bool allow_broadcast_fastpath_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_REDUCE_OPS_H_
| 9,999
| 24.316456
| 94
|
h
|
null |
pytorch-main/caffe2/operators/reducer_functors.h
|
#ifndef CAFFE2_OPERATORS_RECUDER_FUNCTORS_H_
#define CAFFE2_OPERATORS_RECUDER_FUNCTORS_H_
#include <array>
#include "caffe2/core/context.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/eigen_utils.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/proto_utils.h"
namespace caffe2 {
////////////////////////////////////////////////////////////////////////////////
// Range reducers: can leverage that input segment is continuous and provide
// special implementation
////////////////////////////////////////////////////////////////////////////////
// Put forward and backward in the same template?
template <typename T, class Context>
class SumRangeReducer;
template <typename T, class Context>
class SumRangeReducerGradient;
template <typename T>
class SumRangeReducer<T, CPUContext> {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* in,
T* out,
CPUContext* /*context*/) {
// do we need to go through wrapper in math.h?
EigenVectorMap<T> out_vec(out, block_size);
out_vec = ConstEigenMatrixMap<T>(in, block_size, blocks).rowwise().sum();
}
};
template <typename T, class Context>
class SumRangeReducerGradient {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* segment_grad,
T* data_grad,
const T* /*data_in*/, // unused
const T* /*data_out*/, // unused
Context* context) {
// do we have some op that does it smartly with minimum number of memcpy?
for (const auto i : c10::irange(blocks)) {
context->template CopySameDevice<T>(
block_size, segment_grad, data_grad + block_size * i);
}
}
};
struct SumRangeReducerDef {
template <typename T, class Context>
using Reducer = SumRangeReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = SumRangeReducerGradient<T, Context>;
static constexpr const char* name = "Sum";
static constexpr const char* doc =
"Summation is done element-wise across slices of the input tensor and "
"doesn't change the shape of the individual blocks.";
};
// Put forward and backward in the same template?
template <typename T, class Context>
class LogSumExpRangeReducer;
template <typename T, class Context>
class LogSumExpRangeReducerGradient;
template <typename T>
class LogSumExpRangeReducer<T, CPUContext> {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* in,
T* out,
CPUContext* /*context*/) {
for (const auto j : c10::irange(block_size)) {
T max_value = std::numeric_limits<T>::lowest();
for (const auto i : c10::irange(blocks)) {
max_value = std::max(max_value, in[i * block_size + j]);
}
T scaled_exp_sum = 0;
for (const auto i : c10::irange(blocks)) {
scaled_exp_sum += std::exp(in[i * block_size + j] - max_value);
}
*(out++) = std::log(scaled_exp_sum) + max_value;
}
}
T r{1};
};
template <typename T, class Context>
class LogSumExpRangeReducerGradient {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* segment_grad, // GO
T* data_grad, // GI
const T* data_in, // I
const T* data_out, // O
Context* /*context*/) {
for (const auto j : c10::irange(block_size)) {
const T out_grad = *(segment_grad++);
const T offset = *(data_out++);
for (const auto i : c10::irange(blocks)) {
auto idx = i * block_size + j;
data_grad[idx] = out_grad * std::exp(data_in[idx] - offset);
}
}
}
};
struct LogSumExpRangeReducerDef {
template <typename T, class Context>
using Reducer = LogSumExpRangeReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = LogSumExpRangeReducerGradient<T, Context>;
static constexpr const char* name = "LogSumExp";
static constexpr const char* doc =
"LogSumExp computes the element-wise log of the sum of exponentials of "
"input slices. Operation doesn't change the shape of individual blocks.";
};
template <typename T, class Context>
class LogMeanExpRangeReducer;
template <typename T, class Context>
class LogMeanExpRangeReducerGradient;
template <typename T>
class LogMeanExpRangeReducer<T, CPUContext> {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* in,
T* out,
CPUContext* /*context*/) {
for (const auto j : c10::irange(block_size)) {
T max_value = std::numeric_limits<T>::lowest();
for (const auto i : c10::irange(blocks)) {
max_value = std::max(max_value, in[i * block_size + j]);
}
T scaled_exp_sum = 0;
for (const auto i : c10::irange(blocks)) {
scaled_exp_sum += std::exp(in[i * block_size + j] - max_value);
}
scaled_exp_sum /= blocks;
*(out++) = std::log(scaled_exp_sum) + max_value;
}
}
};
template <typename T, class Context>
class LogMeanExpRangeReducerGradient {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* segment_grad, // GO
T* data_grad, // GI
const T* data_in, // I
const T* data_out, // O
Context* /*context*/) {
for (const auto j : c10::irange(block_size)) {
const T out_grad = *(segment_grad++);
const T offset = *(data_out++);
for (const auto i : c10::irange(blocks)) {
auto idx = i * block_size + j;
data_grad[idx] = out_grad * std::exp(data_in[idx] - offset) / blocks;
}
}
}
};
struct LogMeanExpRangeReducerDef {
template <typename T, class Context>
using Reducer = LogMeanExpRangeReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = LogMeanExpRangeReducerGradient<T, Context>;
static constexpr const char* name = "LogMeanExp";
static constexpr const char* doc =
"LogMeanExp computes the element-wise log of the mean of exponentials of "
"input slices. Operation doesn't change the shape of individual blocks.";
};
template <typename T, class Context>
class MeanRangeReducer;
template <typename T, class Context>
class MeanRangeReducerGradient;
template <typename T>
class MeanRangeReducer<T, CPUContext> {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* in,
T* out,
CPUContext* /*context*/) {
for (const auto j : c10::irange(block_size)) {
T avg_value = 0;
for (const auto i : c10::irange(blocks)) {
avg_value += in[i * block_size + j] / blocks;
}
*(out++) = avg_value;
}
}
};
template <typename T, class Context>
class MeanRangeReducerGradient {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* segment_grad, // GO
T* data_grad, // GI
const T* /*data_in*/, // I
const T* /*data_out*/, // O
Context* /*context*/) {
const auto in_grad = 1.0 / blocks;
for (const auto j : c10::irange(block_size)) {
const T out_grad = *(segment_grad++);
for (const auto i : c10::irange(blocks)) {
auto idx = i * block_size + j;
data_grad[idx] = out_grad * in_grad;
}
}
}
};
struct MeanRangeReducerDef {
template <typename T, class Context>
using Reducer = MeanRangeReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = MeanRangeReducerGradient<T, Context>;
static constexpr const char* name = "Mean";
static constexpr const char* doc =
"Mean computation is done element-wise, so that each element of the "
"output slice corresponds to the average value of the respective "
"elements in the input slices. Operation doesn't change the shape of "
"individual blocks.";
};
template <typename T, class Context>
class MaxRangeReducer;
template <typename T, class Context>
class MaxRangeReducerGradient;
template <typename T>
class MaxRangeReducer<T, CPUContext> {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* in,
T* out,
CPUContext* /*context*/) {
for (const auto j : c10::irange(block_size)) {
T max_value = std::numeric_limits<T>::lowest();
for (const auto i : c10::irange(blocks)) {
max_value = std::max(max_value, in[i * block_size + j]);
}
*(out++) = max_value;
}
}
};
template <typename T, class Context>
class MaxRangeReducerGradient {
public:
void operator()(
const int64_t block_size,
const int64_t blocks,
const T* segment_grad, // GO
T* data_grad, // GI
const T* data_in, // I
const T* data_out, // O
Context* /*context*/) {
std::memset(
static_cast<void*>(data_grad), 0, blocks * block_size * sizeof(T));
for (const auto j : c10::irange(block_size)) {
const T out_grad = *(segment_grad++);
const T out = data_out[j];
for (const auto i : c10::irange(blocks)) {
auto idx = i * block_size + j;
if (out == data_in[idx]) {
data_grad[idx] = out_grad;
}
}
}
}
};
struct MaxRangeReducerDef {
template <typename T, class Context>
using Reducer = MaxRangeReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = MaxRangeReducerGradient<T, Context>;
static constexpr const char* name = "Max";
static constexpr const char* doc =
"Max computation is done element-wise, so that each element of the "
"output slice corresponds to the max value of the respective "
"elements in the input slices. Operation doesn't change the shape of "
"individual blocks. This implementation imitates torch nn.Max operator. "
"If the maximum value occurs more than once, the operator will return "
"the first occurrence of value. When computing the gradient using the "
"backward propagation, the gradient input corresponding to the first "
"occurrence of the maximum value will be used.";
};
////////////////////////////////////////////////////////////////////////////////
// Incremental reducers: consume elements one by one
////////////////////////////////////////////////////////////////////////////////
// Base implementation, everything can be overwritten
class BaseReducer {
public:
static constexpr int kInputCount = 1;
struct Meta {
int64_t block_size;
vector<int64_t> block_shape;
bool first_dim;
explicit Meta(bool first = true) : first_dim(first) {}
void computeMeta(at::IntArrayRef dims, size_t skip_dims) {
first_dim ? block_shape.assign(dims.begin() + skip_dims, dims.end())
: block_shape.assign(dims.begin(), dims.end() - skip_dims);
block_size = first_dim ? size_from_dim_(skip_dims, dims)
: size_from_dim_(dims.size() - skip_dims, dims);
}
void observeInput(int input, const Tensor& value, int skip_dims) {
TORCH_DCHECK_EQ(0, input);
auto dims = value.sizes();
computeMeta(dims, skip_dims);
}
void appendOutputShape(vector<int64_t>* output_shape) {
output_shape->insert(
output_shape->end(), block_shape.begin(), block_shape.end());
}
vector<int64_t> getOutputShape(const TensorShape& in, int skip_dims) {
vector<int64_t> dims(in.dims().begin(), in.dims().end());
computeMeta(dims, skip_dims);
return block_shape;
}
};
template <int FixedSize>
void finish(const Meta& /*meta*/, CPUContext* /*context*/) {}
};
class BaseReducerGradient {
public:
// which of the original inputs are required for gradient computation
static constexpr std::array<int, 0> originalInputs() {
return std::array<int, 0>();
}
static constexpr bool computeLength() {
return false;
}
static int numAuxInputsWithGrads(const OperatorDef& /*def*/) {
return 0;
}
static bool requiresDataInput(const OperatorDef& /*def*/) {
return false;
}
// True if the backward op requires the output of the forward op.
static bool requiresForwardOutput() {
return false;
}
struct Meta {
int64_t block_size;
vector<int64_t> block_shape;
bool first_dim;
Meta(const Tensor& out_grad, int skip_dims, bool first_dim = true)
: first_dim(first_dim) {
auto dims = out_grad.sizes();
first_dim ? block_shape.assign(dims.begin() + skip_dims, dims.end())
: block_shape.assign(dims.begin(), dims.end() - skip_dims);
block_size = first_dim
? out_grad.size_from_dim(skip_dims)
: out_grad.size_from_dim(out_grad.dim() - skip_dims);
}
void observeOriginalInput(
int /*original_input*/,
const Tensor& /*value*/,
Tensor* /*input_grad*/, // optional grad to populate
int /*skip_dims*/) {}
void appendGradShape(vector<int64_t>* output_shape) {
output_shape->insert(
output_shape->end(), block_shape.begin(), block_shape.end());
}
};
};
// Put forward and backward in the same template?
template <typename T, class Context>
class SumReducer;
template <typename T, class Context>
class SumReducerGradient;
template <typename T>
class SumReducer<T, CPUContext> : public BaseReducer {
public:
using FixedDispatch = FixedValues<1>;
SumReducer(const Meta& meta, T* out, CPUContext* /*context*/)
: current_size_(0), out_(out) {
// add a wrapper in Context for it
if (meta.first_dim) {
memset(out, 0, sizeof(T) * meta.block_size);
}
}
template <int FixedSize>
void process(
const Meta& meta,
const T* in,
int64_t /*offset*/,
CPUContext* context) {
if (meta.first_dim) {
math::AxpyFixedSize<T, CPUContext, FixedSize>(
meta.block_size, 1, in, out_, context);
} else {
math::Sum<T, CPUContext>(
meta.block_size, in, out_ + current_size_++, context);
}
}
private:
int current_size_;
T* out_;
};
template <typename T, class Context>
class SumReducerGradient : public BaseReducerGradient {
public:
using FixedDispatch = FixedValues<1>;
SumReducerGradient(
const Meta& /*meta*/,
const T* s_grad,
CPUContext* /*context*/)
: s_grad_(s_grad) {}
template <int FixedSize>
void fillGrad(
const Meta& meta,
T* data_grad,
int64_t offset,
Context* context,
const int length) {
if (FixedSize == 1) { // static if
*data_grad = *s_grad_;
} else if (meta.first_dim) {
context->template CopySameDevice<T>(meta.block_size, s_grad_, data_grad);
} else {
math::Set<T, Context>(length, s_grad_[offset], data_grad, context);
}
}
private:
const T* s_grad_;
};
struct SumReducerDef {
template <typename T, class Context>
using Reducer = SumReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = SumReducerGradient<T, Context>;
static constexpr const char* name = "Sum";
static constexpr const char* doc =
"Summation is done element-wise across slices of the input tensor and "
"doesn't change the shape of the individual blocks.";
static void PopulateSchema(OpSchema& /*schema*/) {}
};
// Put forward and backward in the same template?
template <typename T, class Context>
class WeightedSumReducer;
template <typename T, class Context>
class WeightedSumReducerGradient;
template <typename T>
class WeightedSumReducer<T, CPUContext> : public BaseReducer {
public:
static constexpr int kInputCount = 2;
using FixedDispatch = FixedValues<1>;
struct Meta : BaseReducer::Meta {
const T* scalars;
bool first_dim;
explicit Meta(bool first = true) : first_dim(first) {}
void observeInput(int input, const Tensor& value, int skip_dims) {
if (input == 1) {
CAFFE_ENFORCE_EQ(
skip_dims, value.dim(), "SCALARS mustn't have extra dimensions");
scalars = value.data<T>();
return;
}
BaseReducer::Meta::observeInput(input, value, skip_dims);
}
};
WeightedSumReducer(const Meta& meta, T* out, CPUContext* /*context*/)
: out_(out) {
// do we have a wrapper for it?
memset(out, 0, sizeof(T) * meta.block_size);
}
template <int FixedSize>
void
process(const Meta& meta, const T* in, int64_t offset, CPUContext* context) {
CAFFE_ENFORCE(
meta.first_dim,
"WeightedSumReducer implemented only for "
"front dimensions reduction");
math::AxpyFixedSize<T, CPUContext, FixedSize>(
meta.block_size, meta.scalars[offset], in, out_, context);
}
private:
T* out_;
};
template <typename T, class Context>
class WeightedSumReducerGradient : public BaseReducerGradient {
public:
// which of the original inputs are required for gradient computation
static constexpr std::array<int, 1> originalInputs() {
return {{1}};
}
static int numAuxInputsWithGrads(const OperatorDef& def) {
return GetFlagArgument(def, "grad_on_weights");
}
static bool requiresDataInput(const OperatorDef& def) {
return numAuxInputsWithGrads(def) > 0;
}
using FixedDispatch = FixedValues<1>;
struct Meta : public BaseReducerGradient::Meta {
const T* scalars;
T* scalars_grad;
using BaseReducerGradient::Meta::Meta;
void observeOriginalInput(
int original_input,
const Tensor& value,
Tensor* input_grad, // optional grad to populate
int /*skip_dims*/) {
CAFFE_ENFORCE_EQ(1, original_input);
scalars = value.data<T>();
if (input_grad) {
input_grad->ResizeLike(value);
scalars_grad = input_grad->template mutable_data<T>();
}
}
};
WeightedSumReducerGradient(
const Meta& /*meta*/,
const T* s_grad,
CPUContext* /*context*/)
: s_grad_(s_grad) {}
template <int FixedSize>
void fillGrad(
const Meta& meta,
T* data_grad,
int64_t offset,
Context* context,
const int /*length*/) {
math::ScaleFixedSize<T, CPUContext, FixedSize>(
meta.block_size, meta.scalars[offset], s_grad_, data_grad, context);
}
// Special version which is called with the main input too, used only if
// additional input grad is requested
template <int FixedSize>
void fillGradWithMainInput(
const Meta& meta,
const T* data,
T* data_grad,
int64_t offset,
Context* context,
const int /*length*/) {
math::ScaleFixedSize<T, CPUContext, FixedSize>(
meta.block_size, meta.scalars[offset], s_grad_, data_grad, context);
math::Dot(
meta.block_size, s_grad_, data, meta.scalars_grad + offset, context);
}
private:
const T* s_grad_;
};
struct WeightedSumReducerDef {
template <typename T, class Context>
using Reducer = WeightedSumReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = WeightedSumReducerGradient<T, Context>;
static constexpr const char* name = "WeightedSum";
static constexpr const char* doc =
"Input slices are first scaled by SCALARS and then summed element-wise. "
"It doesn't change the shape of the individual blocks.";
static void PopulateSchema(OpSchema& schema) {
schema.Input(0, "DATA", "Input tensor for the summation");
schema.Input(
1,
"SCALARS",
"Scalar multipliers for the input slices. Must be a vector with the "
"length matching the number of slices");
schema.Arg(
"grad_on_weights",
"Produce also gradient for `weights`. For now it's only supported in "
"`Lengths`-based operators");
}
};
template <typename T, class Context>
class MeanReducer;
template <typename T, class Context>
class MeanReducerGradient;
template <typename T>
class MeanReducer<T, CPUContext> : public BaseReducer {
public:
using FixedDispatch = FixedValues<1>;
MeanReducer(const Meta& meta, T* out, CPUContext* /*context*/)
: out_(out), current_size_(0) {
if (meta.first_dim) {
memset(out, 0, sizeof(T) * meta.block_size);
}
}
template <int FixedSize>
void process(
const Meta& meta,
const T* in,
int64_t /*offset*/,
CPUContext* context) {
if (meta.first_dim) {
math::AxpyFixedSize<T, CPUContext, FixedSize>(
meta.block_size, 1, in, out_, context);
} else {
math::Sum<T, CPUContext>(
meta.block_size, in, out_ + current_size_, context);
}
current_size_++;
}
template <int FixedSize>
void finish(const Meta& meta, CPUContext* context) {
if (meta.first_dim) {
if (current_size_ > 0) {
math::ScaleFixedSize<T, CPUContext, FixedSize>(
meta.block_size, 1.0 / current_size_, out_, out_, context);
}
} else {
math::ScaleFixedSize<T, CPUContext, FixedSize>(
current_size_, 1.0 / meta.block_size, out_, out_, context);
}
}
private:
T* out_;
int current_size_;
};
template <typename T, class Context>
class MeanReducerGradient : public BaseReducerGradient {
public:
static constexpr bool computeLength() {
return true;
}
using FixedDispatch = FixedValues<1>;
MeanReducerGradient(
const Meta& /*meta*/,
const T* s_grad,
CPUContext* /*context*/)
: s_grad_(s_grad) {}
template <int FixedSize>
void fillGrad(
const Meta& meta,
T* data_grad,
int64_t offset,
Context* context,
const int length) {
CAFFE_ENFORCE_GT(length, 0, "Segment length must be > 0");
if (meta.first_dim) {
math::ScaleFixedSize<T, CPUContext, FixedSize>(
meta.block_size, 1.0 / length, s_grad_, data_grad, context);
} else {
math::Set<T, CPUContext>(
length, s_grad_[offset] * 1.0f / length, data_grad, context);
}
}
private:
const T* s_grad_;
};
struct MeanReducerDef {
template <typename T, class Context>
using Reducer = MeanReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = MeanReducerGradient<T, Context>;
static constexpr const char* name = "Mean";
static constexpr const char* doc =
"Mean computes the element-wise mean of the input slices. "
"Operation doesn't change the shape of the individual blocks.";
static void PopulateSchema(OpSchema& /*schema*/) {}
};
template <typename T, class Context>
class MaxReducer;
template <typename T, class Context>
class MaxReducerGradient;
template <typename T>
class MaxReducer<T, CPUContext> : public BaseReducer {
public:
using FixedDispatch = FixedValues<1>;
MaxReducer(const Meta& meta, T* out, CPUContext* /*context*/)
: out_(out), current_size_(0) {
// add a wrapper in Context for it
memset(out, 0, sizeof(T) * meta.block_size);
}
template <int FixedSize>
void process(
const Meta& meta,
const T* in,
int64_t /*offset*/,
CPUContext* context) {
CAFFE_ENFORCE(
meta.first_dim,
"MaxReducer implemented only for front dimensions reduction");
if (current_size_ > 0) {
EigenVectorMap<T> output_vec(out_, meta.block_size);
output_vec =
output_vec.cwiseMax(ConstEigenVectorMap<T>(in, meta.block_size));
} else {
memcpy(out_, in, sizeof(T) * meta.block_size);
}
++current_size_;
}
private:
T* out_;
int current_size_;
};
template <typename T, class Context>
class MaxReducerGradient : public BaseReducerGradient {
public:
static bool requiresDataInput(const OperatorDef& /*def*/) {
return true;
}
static bool requiresForwardOutput() {
return true;
}
using FixedDispatch = FixedValues<1>;
MaxReducerGradient(
const Meta& /*meta*/,
const T* s_grad,
CPUContext* /*context*/)
: s_grad_(s_grad) {}
template <int FixedSize>
void fillGradWithMainInputAndForwardOutput(
const Meta& meta,
const T* data,
T* data_grad,
const T* forward_output,
int64_t /*offset*/,
Context* /*context*/,
const int /*length*/) {
for (const auto i : c10::irange(meta.block_size)) {
data_grad[i] = data[i] == forward_output[i] ? s_grad_[i] : 0;
}
}
private:
const T* s_grad_;
};
struct MaxReducerDef {
template <typename T, class Context>
using Reducer = MaxReducer<T, Context>;
template <typename T, class Context>
using ReducerGradient = MaxReducerGradient<T, Context>;
static constexpr const char* name = "Max";
static constexpr const char* doc =
"Max computes the element-wise max of the input slices. "
"Operation doesn't change the shape of the individual blocks.";
static void PopulateSchema(OpSchema& /*schema*/) {}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_RECUDER_FUNCTORS_H_
| 24,714
| 28.422619
| 80
|
h
|
null |
pytorch-main/caffe2/operators/relu_n_op.h
|
#ifndef CAFFE2_OPERATORS_RELU_N_OP_H_
#define CAFFE2_OPERATORS_RELU_N_OP_H_
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
namespace caffe2 {
template <class Context>
struct ReluNFunctor {
explicit ReluNFunctor(OperatorBase& op)
: n(op.GetSingleArgument<float>("n", 6.0f)) {
CAFFE_ENFORCE_GT(n, 0, "n should be greater than 0");
}
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const;
const float n;
};
template <class Context>
struct ReluNGradientFunctor {
explicit ReluNGradientFunctor(OperatorBase& op)
: n(op.GetSingleArgument<float>("n", 6.0f)) {
CAFFE_ENFORCE_GT(n, 0, "n should be greater than 0");
}
template <typename T>
bool Forward(
const std::vector<int>& Y_dims,
const std::vector<int>& dY_dims,
const T* Y,
const T* dY,
T* dX,
Context* context) const;
const float n;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_RELU_N_OP_H_
| 990
| 21.022222
| 73
|
h
|
null |
pytorch-main/caffe2/operators/remove_data_blocks_op.h
|
#ifndef CAFFE2_OPERATORS_REMOVE_DATA_BLOCKS_OP_H_
#define CAFFE2_OPERATORS_REMOVE_DATA_BLOCKS_OP_H_
#include <algorithm>
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class RemoveDataBlocksOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(RemoveDataBlocksOp);
USE_DISPATCH_HELPER;
bool RunOnDevice() override {
if (Input(INDICES).sizes()[0] == 0) {
Output(0)->CopyFrom(Input(0));
return true;
} else {
return DispatchHelper<TensorTypes<int, long>>::call(this, Input(INDICES));
}
}
template <typename T>
bool DoRunWithType() {
const auto& data = Input(DATA);
const auto& indices = Input(INDICES);
CAFFE_ENFORCE(data.dim() > 0, "DATA should be at leat 1-D.");
CAFFE_ENFORCE(indices.dim() == 1, "INDICES should be 1-D.");
const auto outer_size = data.sizes()[0];
const auto block_size = data.size_from_dim(1);
const auto block_size_bytes = block_size * data.dtype().itemsize();
auto indices_size = indices.sizes()[0];
const char* data_ptr = (char*)data.raw_data();
const auto* ind_ptr = indices.template data<T>();
std::vector<T> ind_vec;
for (const auto i : c10::irange(indices_size)) {
ind_vec.push_back(ind_ptr[i]);
}
std::sort(ind_vec.begin(), ind_vec.end());
CAFFE_ENFORCE(ind_vec[0] >= 0, "The min index should be larger than zero.");
CAFFE_ENFORCE(
ind_vec[indices_size - 1] < outer_size,
"The max index should be smaller than the data outer size.");
// removes duplicate indices
ind_vec.erase(std::unique(ind_vec.begin(), ind_vec.end()), ind_vec.end());
indices_size = ind_vec.size();
auto* output = Output(0);
auto shape = data.sizes().vec();
shape[0] -= indices_size;
output->Resize(shape);
char* out_ptr = (char*)output->raw_mutable_data(data.dtype());
ind_vec.insert(ind_vec.begin(), -1);
int64_t ind_vec_size = ind_vec.size();
for (const auto i : c10::irange(ind_vec_size)) {
int64_t interval_start = ind_vec[i] + 1;
int64_t interval_end =
(i == ind_vec_size - 1) ? outer_size : ind_vec[i + 1];
auto num_items = interval_end - interval_start;
context_.CopyItemsSameDevice(
data.dtype(),
num_items * block_size,
data_ptr + block_size_bytes * interval_start,
out_ptr);
out_ptr += block_size_bytes * num_items;
}
return true;
}
private:
INPUT_TAGS(DATA, INDICES);
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_REMOVE_DATA_BLOCKS_OP_H_
| 2,691
| 29.942529
| 80
|
h
|
null |
pytorch-main/caffe2/operators/replace_nan_op.h
|
#ifndef CAFFE_OPERATORS_REPLACE_NAN_OP_H_
#define CAFFE_OPERATORS_REPLACE_NAN_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class ReplaceNaNOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ReplaceNaNOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
void ReplaceNaN(const T& value, const int64_t size, const T* X, T* Y);
template <typename T>
bool DoRunWithType() {
T value = this->template GetSingleArgument<T>("value", 0);
auto& input = Input(0);
auto* output = Output(0, input.sizes(), at::dtype<T>());
const T* input_data = input.template data<T>();
T* output_data = output->template mutable_data<T>();
ReplaceNaN<T>(value, input.numel(), input_data, output_data);
return true;
}
};
} // namespace caffe2
#endif // CAFFE_OPERATORS_REPLACE_NAN_OP_H_
| 1,170
| 24.456522
| 76
|
h
|
null |
pytorch-main/caffe2/operators/reshape_op.h
|
#ifndef CAFFE2_OPERATORS_RESHAPE_OP_H_
#define CAFFE2_OPERATORS_RESHAPE_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
// Takes a shape and data tensor and reshapes it
template <typename F, class Context>
class ReshapeOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ReshapeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
new_shape_(this->template GetRepeatedArgument<int64_t>("shape")) {}
bool RunOnDevice() override {
if (InputSize() == 2) {
return DispatchHelper<TensorTypes<int, int64_t>>::call(this, Input(1));
}
CAFFE_ENFORCE(
OperatorBase::HasArgument("shape"), "Argument `shape` is missing.");
return this->template DoRunWithType<int64_t>();
}
template <typename T>
bool DoRunWithType() {
DoRunWithTypeImpl<T>(Input(0), Output(0));
return true;
}
protected:
template <typename T>
void DoRunWithTypeImpl(const Tensor& input, Tensor* output) {
vector<int64_t> actual_new_shape = new_shape_;
if (InputSize() == 2) {
CAFFE_ENFORCE(
!OperatorBase::HasArgument("shape"),
"New shape is specified by the input blob, do not pass in "
"the argument `shape`.");
// Shape should be always stored only on CPU
// Just in case if for some reason shape is on GPU
if (this->InputIsTensorType(1, CPU)) {
// originally, shape input must be in CPU context
auto& shape = this->template Input<Tensor>(1, CPU);
CAFFE_ENFORCE_EQ(
shape.dim(),
1,
"When input_as_shape is true, the input must be a 1D tensor of "
"data type int64_t");
CAFFE_ENFORCE(shape.numel() > 0);
auto* shape_data = shape.template data<T>();
actual_new_shape.insert(
actual_new_shape.end(), shape_data, shape_data + shape.dim32(0));
} else {
auto& shape = Input(1);
CAFFE_ENFORCE_EQ(
shape.dim(),
1,
"When input_as_shape is true, the input must be a 1D tensor of "
"data type int64_t");
CAFFE_ENFORCE(shape.numel() > 0);
auto* shape_data = shape.template data<T>();
// Fetch copy from
std::unique_ptr<T[]> shape_data_copy =
std::make_unique<T[]>(shape.dim32(0));
context_.template CopyToCPU<T>(
shape.dim32(0), shape_data, shape_data_copy.get());
actual_new_shape.insert(
actual_new_shape.end(),
shape_data_copy.get(),
shape_data_copy.get() + shape.dim32(0));
}
}
// Checks if the new shape is valid and fills in the missing dimension
// specified by -1.
// NOTE: At most one dimension can be -1.
auto total_size = input.numel();
T size = 1;
// NOTE: support for legacy caffe1 syntax
// Copy over the dimensions for those that are specified zero.
if (total_size != 0) {
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (size_t i = 0; i < actual_new_shape.size() && i < input.dim(); ++i) {
if (actual_new_shape[i] == 0) {
actual_new_shape[i] = input.size(i);
}
}
}
int unknown_idx = -1;
for (const auto i : c10::irange(actual_new_shape.size())) {
const auto dim = actual_new_shape[i];
if (dim == -1) {
CAFFE_ENFORCE(
unknown_idx == -1,
"Argument `shape` has more than one missing dimension.");
unknown_idx = i;
} else {
size *= dim;
}
}
if (size == 0 && total_size != 0) {
CAFFE_THROW(
"Can not reshape a non-zero size (",
total_size,
") tensor to zero size.");
}
if (total_size != 0) {
// if tensor is not empty, infer the size of the unknown index
if (unknown_idx != -1) {
CAFFE_ENFORCE_NE(
size,
0,
"New shape at dim ",
unknown_idx,
" can not be inferred since new size is zero.");
CAFFE_ENFORCE(
total_size % size == 0,
"Argument `shape` does not agree with the input data.",
" (",
total_size,
" vs ",
size,
")");
actual_new_shape[unknown_idx] = total_size / size;
} else {
CAFFE_ENFORCE_EQ(
total_size,
size,
"Argument `shape` does not agree with the input data.",
" (",
total_size,
" != ",
size,
")");
}
} else if (unknown_idx != -1) {
// if size is empty, then set unknown index to be 0 (empty tensor)
actual_new_shape[unknown_idx] = 0;
}
// Write the original shape to the second output.
auto* old_shape = this->template Output<Tensor>(1, CPU);
old_shape->Resize(input.sizes().size());
T* old_shape_data = old_shape->template mutable_data<T>();
std::vector<T> old_shape_vector(input.sizes().begin(), input.sizes().end());
for (const auto i : c10::irange(old_shape_vector.size())) {
old_shape_data[i] = old_shape_vector[i];
}
output->Resize(actual_new_shape);
if (output != &input) {
// If we are not doing in-place computation, a copy is needed.
context_.CopyItemsSameDevice(
input.dtype(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.dtype()));
}
}
private:
vector<int64_t> new_shape_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_RESHAPE_OP_H_
| 5,768
| 31.22905
| 80
|
h
|
null |
pytorch-main/caffe2/operators/resize_3d_op.h
|
#pragma once
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(ResizeNearest3D);
namespace caffe2 {
template <typename T, class Context>
class ResizeNearest3DOp final : public Operator<Context> {
public:
template <class... Args>
explicit ResizeNearest3DOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
temporal_scale_(1),
height_scale_(1),
width_scale_(1),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))) {
if (HasArgument("temporal_scale")) {
temporal_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("temporal_scale", 1));
}
if (HasArgument("height_scale")) {
height_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("height_scale", 1));
}
if (HasArgument("width_scale")) {
width_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("width_scale", 1));
}
CAFFE_ENFORCE_GT(temporal_scale_, 0);
CAFFE_ENFORCE_GT(height_scale_, 0);
CAFFE_ENFORCE_GT(width_scale_, 0);
CAFFE_ENFORCE(order_ == StorageOrder::NCHW || order_ == StorageOrder::NHWC);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
bool RunOnDeviceWithOrderNCHW();
protected:
T temporal_scale_;
T height_scale_;
T width_scale_;
StorageOrder order_;
};
template <typename T, class Context>
class ResizeNearest3DGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit ResizeNearest3DGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
temporal_scale_(1),
height_scale_(1),
width_scale_(1),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))) {
temporal_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("temporal_scale", 1));
height_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("height_scale", 1));
width_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("width_scale", 1));
CAFFE_ENFORCE_GT(temporal_scale_, 0);
CAFFE_ENFORCE_GT(height_scale_, 0);
CAFFE_ENFORCE_GT(width_scale_, 0);
CAFFE_ENFORCE(order_ == StorageOrder::NCHW || order_ == StorageOrder::NHWC);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
bool RunOnDeviceWithOrderNCHW();
protected:
T temporal_scale_;
T height_scale_;
T width_scale_;
StorageOrder order_;
};
} // namespace caffe2
| 2,677
| 27.795699
| 80
|
h
|
null |
pytorch-main/caffe2/operators/resize_op.h
|
#pragma once
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(ResizeNearest);
namespace caffe2 {
template <typename T, class Context>
class ResizeNearestOp final : public Operator<Context> {
public:
template <class... Args>
explicit ResizeNearestOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
width_scale_(1),
height_scale_(1),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))) {
if (HasArgument("width_scale")) {
width_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("width_scale", 1));
}
if (HasArgument("height_scale")) {
height_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("height_scale", 1));
}
CAFFE_ENFORCE_GT(width_scale_, 0);
CAFFE_ENFORCE_GT(height_scale_, 0);
CAFFE_ENFORCE(order_ == StorageOrder::NCHW || order_ == StorageOrder::NHWC);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
bool RunOnDeviceWithOrderNCHW();
bool RunOnDeviceWithOrderNHWC();
protected:
T width_scale_;
T height_scale_;
StorageOrder order_;
};
template <typename T, class Context>
class ResizeNearestGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit ResizeNearestGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
width_scale_(1),
height_scale_(1),
order_(StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NCHW"))) {
width_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("width_scale", 1));
height_scale_ = static_cast<T>(
this->template GetSingleArgument<float>("height_scale", 1));
CAFFE_ENFORCE_GT(width_scale_, 0);
CAFFE_ENFORCE_GT(height_scale_, 0);
CAFFE_ENFORCE(order_ == StorageOrder::NCHW || order_ == StorageOrder::NHWC);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
bool RunOnDeviceWithOrderNCHW();
bool RunOnDeviceWithOrderNHWC();
protected:
T width_scale_;
T height_scale_;
StorageOrder order_;
};
} // namespace caffe2
| 2,307
| 27.146341
| 80
|
h
|
null |
pytorch-main/caffe2/operators/reverse_packed_segs_op.h
|
#ifndef CAFFE2_OPERATORS_REVERSE_PACKED_SEGS_OP_H_
#define CAFFE2_OPERATORS_REVERSE_PACKED_SEGS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class ReversePackedSegsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(ReversePackedSegsOp);
USE_DISPATCH_HELPER;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double, int, long, bool>>::call(
this, Input(DATA));
}
template <typename T>
bool DoRunWithType() {
if (Input(LENGTHS).template IsType<int>()) {
DoRunWithLengthType<T, int>();
} else {
DoRunWithLengthType<T, long>();
}
return true;
}
private:
INPUT_TAGS(DATA, LENGTHS);
template <typename T, typename LengthType>
void DoRunWithLengthType() {
const auto& data = Input(DATA);
const auto& lengths = Input(LENGTHS);
CAFFE_ENFORCE(
data.dim() == 3,
"DATA should be 3-D tensor <lengths, "
"segments, embeddings>");
CAFFE_ENFORCE(lengths.dim() == 1, "LENGTH should be 1-D");
const auto shape = data.sizes();
auto* output = Output(0, shape, at::dtype<T>());
const auto max_length = data.sizes()[0];
const auto batch_size = data.sizes()[1];
const auto block_size = data.sizes()[2];
CAFFE_ENFORCE(
lengths.sizes()[0] == batch_size,
"lenths size should be"
" equal to batch size");
const T* data_ptr = data.template data<T>();
const LengthType* lengths_ptr = lengths.template data<LengthType>();
vector<LengthType> lengths_host(batch_size);
context_.template CopyToCPU<LengthType>(
batch_size, lengths_ptr, &lengths_host[0]);
context_.FinishDeviceComputation();
T* rev_data_ptr = output->template mutable_data<T>();
for (const auto i : c10::irange(batch_size)) {
const auto& seg_length = lengths_host[i];
CAFFE_ENFORCE_LE(seg_length, max_length);
int64_t j = 0;
for (; j < seg_length; j++) {
const T* data_block_ptr = data_ptr + (j * batch_size + i) * block_size;
T* rev_data_block_ptr =
rev_data_ptr + ((seg_length - 1 - j) * batch_size + i) * block_size;
context_.template CopySameDevice<T>(
block_size, data_block_ptr, rev_data_block_ptr);
}
for (; j < max_length; j++) {
const T* data_block_ptr = data_ptr + (j * batch_size + i) * block_size;
T* rev_data_block_ptr =
rev_data_ptr + (j * batch_size + i) * block_size;
context_.template CopySameDevice<T>(
block_size, data_block_ptr, rev_data_block_ptr);
}
}
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_REVERSE_PACKED_SEGS_OP_H_
| 2,805
| 29.835165
| 80
|
h
|
null |
pytorch-main/caffe2/operators/rmac_regions_op.h
|
#ifndef CAFFE2_OPERATORS_RMAC_REGIONS_OP_H
#define CAFFE2_OPERATORS_RMAC_REGIONS_OP_H
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class RMACRegionsOp final : public Operator<Context> {
public:
template <class... Args>
explicit RMACRegionsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
scales_(this->template GetSingleArgument<int>("scales", 3)),
overlap_(this->template GetSingleArgument<float>("overlap", 0.4f)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
int scales_;
float overlap_;
Tensor num_rois_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_RMAC_REGIONS_OP_H
| 708
| 21.870968
| 77
|
h
|
null |
pytorch-main/caffe2/operators/rms_norm_op.h
|
#ifndef CAFFE2_OPERATORS_RMS_NORM_OP_H_
#define CAFFE2_OPERATORS_RMS_NORM_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// RMSNorm op.
// https://openreview.net/pdf?id=SygkZ3MTJE
template <class Context>
class RMSNormOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit RMSNormOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, 1),
OP_SINGLE_ARG(float, "eps", eps_, 0.0f) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType();
private:
const int axis_;
const float eps_;
};
template <class Context>
class RMSNormGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit RMSNormGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, 1) {}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& dY = Input(0);
const auto& X = Input(1);
const auto& gamma = Input(2);
const auto& rrms = Input(3);
const int canonical_axis = X.canonical_axis_index(axis_);
const int64_t M = X.size_to_dim(canonical_axis);
const int64_t N = X.size_from_dim(canonical_axis);
auto* dX = Output(0, X.sizes(), at::dtype<T>());
auto* dgamma = Output(1, gamma.sizes(), at::dtype<T>());
auto* dbeta = Output(2, gamma.sizes(), at::dtype<T>());
const T* dY_data = dY.template data<T>();
const T* X_data = X.template data<T>();
const T* gamma_data = gamma.template data<T>();
const T* rrms_data = rrms.template data<T>();
T* dX_data = dX->template mutable_data<T>();
T* dgamma_data = dgamma->template mutable_data<T>();
T* dbeta_data = dbeta->template mutable_data<T>();
if (M == 0) {
math::Set<T, Context>(N, T(0), dgamma_data, &context_);
math::Set<T, Context>(N, T(0), dbeta_data, &context_);
return true;
}
RMSNormBackward<T>(M, N, dY_data, X_data, gamma_data, rrms_data, dX_data);
GammaBetaBackward<T>(
M, N, dY_data, X_data, rrms_data, dgamma_data, dbeta_data);
return true;
}
private:
template <typename T>
void RMSNormBackward(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* gamma,
const T* rrms,
T* dX);
template <typename T>
void GammaBetaBackward(
int64_t M,
int64_t N,
const T* dY,
const T* X,
const T* rrms,
T* dgamma,
T* dbeta);
const int axis_;
Tensor c2_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_RMS_NORM_OP_H_
| 2,968
| 25.508929
| 78
|
h
|
null |
pytorch-main/caffe2/operators/roi_align_gradient_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef CAFFE2_OPERATORS_ROI_ALIGN_GRADIENT_OP_H_
#define CAFFE2_OPERATORS_ROI_ALIGN_GRADIENT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(RoIAlignGradient)
namespace caffe2 {
template <typename T, class Context>
class RoIAlignGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit RoIAlignGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
spatial_scale_(
this->template GetSingleArgument<float>("spatial_scale", 1.)),
pooled_height_(this->template GetSingleArgument<int>("pooled_h", 1)),
pooled_width_(this->template GetSingleArgument<int>("pooled_w", 1)),
sampling_ratio_(
this->template GetSingleArgument<int>("sampling_ratio", -1)),
aligned_(this->template GetSingleArgument<bool>("aligned", false)) {
TORCH_DCHECK_GT(spatial_scale_, 0);
TORCH_DCHECK_GT(pooled_height_, 0);
TORCH_DCHECK_GT(pooled_width_, 0);
TORCH_DCHECK_GE(sampling_ratio_, 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
CAFFE_NOT_IMPLEMENTED;
}
protected:
float spatial_scale_;
int pooled_height_;
int pooled_width_;
int sampling_ratio_;
bool aligned_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ROI_ALIGN_GRADIENT_OP_H_
| 1,510
| 29.22
| 77
|
h
|
null |
pytorch-main/caffe2/operators/roi_align_op.h
|
#ifndef CAFFE2_OPERATORS_ROI_ALIGN_OP_H_
#define CAFFE2_OPERATORS_ROI_ALIGN_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(RoIAlign)
namespace caffe2 {
template <typename T, class Context>
class RoIAlignOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit RoIAlignOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))),
OP_SINGLE_ARG(float, "spatial_scale", spatial_scale_, 1.0f),
OP_SINGLE_ARG(int, "pooled_h", pooled_h_, 1),
OP_SINGLE_ARG(int, "pooled_w", pooled_w_, 1),
OP_SINGLE_ARG(int, "sampling_ratio", sampling_ratio_, -1),
OP_SINGLE_ARG(bool, "aligned", aligned_, false) {
TORCH_DCHECK_GT(spatial_scale_, 0.0f);
TORCH_DCHECK_GT(pooled_h_, 0);
TORCH_DCHECK_GT(pooled_w_, 0);
DCHECK(order_ == StorageOrder::NCHW || order_ == StorageOrder::NHWC);
}
bool RunOnDevice() override {
const auto& X = Input(0);
const auto& R = Input(1);
CAFFE_ENFORCE_EQ(X.dim(), 4);
CAFFE_ENFORCE_EQ(R.dim(), 2);
const int64_t roi_cols = R.size(1);
CAFFE_ENFORCE(roi_cols == 4 || roi_cols == 5);
const int64_t N = R.size(0);
const int64_t C = X.size(order_ == StorageOrder::NCHW ? 1 : 3);
const int64_t H = X.size(order_ == StorageOrder::NCHW ? 2 : 1);
const int64_t W = X.size(order_ == StorageOrder::NCHW ? 3 : 2);
const std::vector<int64_t> Y_sizes = order_ == StorageOrder::NCHW
? std::vector<int64_t>{N, C, pooled_h_, pooled_w_}
: std::vector<int64_t>{N, pooled_h_, pooled_w_, C};
auto* Y = Output(0, Y_sizes, at::dtype<T>());
if (N == 0) {
return true;
}
const T* X_data = X.template data<T>();
const T* R_data = R.template data<T>();
T* Y_data = Y->template mutable_data<T>();
return order_ == StorageOrder::NCHW
? RunOnDeviceWithOrderNCHW(N, C, H, W, roi_cols, X_data, R_data, Y_data)
: RunOnDeviceWithOrderNHWC(
N, C, H, W, roi_cols, X_data, R_data, Y_data);
}
private:
bool RunOnDeviceWithOrderNCHW(
int64_t N,
int64_t C,
int64_t H,
int64_t W,
int64_t roi_cols,
const T* X,
const T* R,
T* Y);
bool RunOnDeviceWithOrderNHWC(
int64_t N,
int64_t C,
int64_t H,
int64_t W,
int64_t roi_cols,
const T* X,
const T* R,
T* Y);
const StorageOrder order_;
const float spatial_scale_;
const int pooled_h_;
const int pooled_w_;
const int sampling_ratio_;
const bool aligned_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ROI_ALIGN_OP_H_
| 2,875
| 29.273684
| 80
|
h
|
null |
pytorch-main/caffe2/operators/roi_align_rotated_gradient_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef ROI_ALIGN_ROTATED_GRADIENT_OP_H_
#define ROI_ALIGN_ROTATED_GRADIENT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class RoIAlignRotatedGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit RoIAlignRotatedGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
spatial_scale_(
this->template GetSingleArgument<float>("spatial_scale", 1.)),
pooled_height_(this->template GetSingleArgument<int>("pooled_h", 1)),
pooled_width_(this->template GetSingleArgument<int>("pooled_w", 1)),
sampling_ratio_(
this->template GetSingleArgument<int>("sampling_ratio", -1)),
aligned_(this->template GetSingleArgument<bool>("aligned", false)) {
TORCH_DCHECK_GT(spatial_scale_, 0);
TORCH_DCHECK_GT(pooled_height_, 0);
TORCH_DCHECK_GT(pooled_width_, 0);
TORCH_DCHECK_GE(sampling_ratio_, 0);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
CAFFE_NOT_IMPLEMENTED;
}
protected:
float spatial_scale_;
int pooled_height_;
int pooled_width_;
int sampling_ratio_;
bool aligned_;
};
} // namespace caffe2
#endif // ROI_ALIGN_ROTATED_GRADIENT_OP_H_
| 1,393
| 28.659574
| 77
|
h
|
null |
pytorch-main/caffe2/operators/roi_align_rotated_op.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef ROTATED_ROI_ALIGN_OP_H_
#define ROTATED_ROI_ALIGN_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(RoIAlignRotated)
namespace caffe2 {
template <typename T, class Context>
class RoIAlignRotatedOp final : public Operator<Context> {
public:
template <class... Args>
explicit RoIAlignRotatedOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))),
spatial_scale_(
this->template GetSingleArgument<float>("spatial_scale", 1.)),
pooled_height_(this->template GetSingleArgument<int>("pooled_h", 1)),
pooled_width_(this->template GetSingleArgument<int>("pooled_w", 1)),
sampling_ratio_(
this->template GetSingleArgument<int>("sampling_ratio", -1)),
aligned_(this->template GetSingleArgument<bool>("aligned", false)) {
TORCH_DCHECK_GT(spatial_scale_, 0);
TORCH_DCHECK_GT(pooled_height_, 0);
TORCH_DCHECK_GT(pooled_width_, 0);
TORCH_DCHECK_GE(sampling_ratio_, 0);
DCHECK(order_ == StorageOrder::NCHW || order_ == StorageOrder::NHWC);
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
StorageOrder order_;
float spatial_scale_;
int pooled_height_;
int pooled_width_;
int sampling_ratio_;
bool aligned_;
};
} // namespace caffe2
#endif // ROTATED_ROI_ALIGN_OP_H_
| 1,628
| 30.326923
| 77
|
h
|
null |
pytorch-main/caffe2/operators/roi_pool_op.h
|
#ifndef ROI_POOL_OP_H_
#define ROI_POOL_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class RoIPoolOp final : public Operator<Context> {
public:
template <class... Args>
explicit RoIPoolOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
is_test_(
this->template GetSingleArgument<int>(OpSchema::Arg_IsTest, 0)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))),
pooled_height_(this->template GetSingleArgument<int>("pooled_h", 1)),
pooled_width_(this->template GetSingleArgument<int>("pooled_w", 1)),
spatial_scale_(
this->template GetSingleArgument<float>("spatial_scale", 1.)) {
CAFFE_ENFORCE(
(is_test_ && OutputSize() == 1) || (!is_test_ && OutputSize() == 2),
"Output size mismatch.");
CAFFE_ENFORCE_GT(spatial_scale_, 0);
CAFFE_ENFORCE_GT(pooled_height_, 0);
CAFFE_ENFORCE_GT(pooled_width_, 0);
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
bool is_test_;
StorageOrder order_;
int pooled_height_;
int pooled_width_;
float spatial_scale_;
};
template <typename T, class Context>
class RoIPoolGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit RoIPoolGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
spatial_scale_(
this->template GetSingleArgument<float>("spatial_scale", 1.)),
pooled_height_(this->template GetSingleArgument<int>("pooled_h", 1)),
pooled_width_(this->template GetSingleArgument<int>("pooled_w", 1)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE_GT(spatial_scale_, 0);
CAFFE_ENFORCE_GT(pooled_height_, 0);
CAFFE_ENFORCE_GT(pooled_width_, 0);
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
CAFFE_NOT_IMPLEMENTED;
}
protected:
float spatial_scale_;
int pooled_height_;
int pooled_width_;
StorageOrder order_;
};
} // namespace caffe2
#endif // ROI_POOL_OP_H_
| 2,503
| 30.3
| 79
|
h
|
null |
pytorch-main/caffe2/operators/rowmul_op.h
|
#ifndef CAFFE2_OPERATORS_ROW_MUL_H_
#define CAFFE2_OPERATORS_ROW_MUL_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
// A hacky version of Mul with broadcast
// RowMul([mat, w], [output])
template <typename T, class Context>
class RowMulOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(RowMulOp);
bool RunOnDevice() override {
auto& mat = Input(0);
auto& w = Input(1);
auto* output = Output(0, mat.sizes(), at::dtype<T>());
T* output_data = output->template mutable_data<T>();
const T* mat_data = mat.template data<T>();
const T* w_data = w.template data<T>();
// Dimension checking
CAFFE_ENFORCE_EQ(
w.numel(),
mat.dim32(0),
"Length of w should be equal to the first dim of mat");
auto block_size = mat.size_from_dim(1);
for (const auto i : c10::irange(w.numel())) {
size_t offset = i * block_size;
for (const auto j : c10::irange(block_size)) {
output_data[offset + j] = mat_data[offset + j] * w_data[i];
}
}
return true;
}
};
// A hacky version
template <typename T, class Context>
class ReduceTailSumOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(ReduceTailSumOp);
bool RunOnDevice() override {
auto& mat = Input(0);
int N = mat.dim32(0);
int block_size = mat.size_from_dim(1);
auto* output = Output(0, {N}, at::dtype<T>());
T* output_data = output->template mutable_data<T>();
const T* mat_data = mat.template data<T>();
for (const auto i : c10::irange(N)) {
output_data[i] = 0;
size_t offset = i * block_size;
for (const auto j : c10::irange(block_size)) {
output_data[i] += mat_data[offset + j];
}
}
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ROW_MUL_H_
| 2,008
| 24.75641
| 67
|
h
|
null |
pytorch-main/caffe2/operators/scale_blobs_op.h
|
#ifndef CAFFE2_OPERATORS_SCALE_BLOBS_OP_H_
#define CAFFE2_OPERATORS_SCALE_BLOBS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class Context>
class ScaleBlobsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ScaleBlobsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(float, "scale", scale_, 1.0f) {}
template <typename T>
bool DoRunWithType() {
int batchSize = InputSize();
for (const auto i : c10::irange(batchSize)) {
const auto& X = Input(i);
auto* Y = Output(i, X.sizes(), at::dtype<T>());
math::Scale<float, T, Context>(
X.numel(),
scale_,
X.template data<T>(),
Y->template mutable_data<T>(),
&context_);
}
return true;
}
bool RunOnDevice() override {
for (const auto i : c10::irange(InputSize())) {
auto& input = this->template Input<Tensor>(i, CPU);
auto* output = this->template Output<Tensor>(i, CPU);
output->ResizeLike(input);
}
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
}
private:
const float scale_;
Tensor blobSizes_;
Tensor inputs_;
Tensor outputs_;
Tensor hostBlobSizes_;
Tensor hostInputs_;
Tensor hostOutputs_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SCALE_BLOBS_OP_H_
| 1,503
| 24.066667
| 68
|
h
|
null |
pytorch-main/caffe2/operators/scale_op.h
|
#ifndef CAFFE2_OPERATORS_SCALE_OP_H_
#define CAFFE2_OPERATORS_SCALE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class ScaleOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ScaleOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
scale_(this->template GetSingleArgument<float>("scale", 1.0)) {}
template <typename T>
bool DoRunWithType() {
auto& X = Input(0);
auto* Y = Output(0, X.sizes(), at::dtype<T>());
math::Scale<float, T, Context>(
X.numel(),
scale_,
X.template data<T>(),
Y->template mutable_data<T>(),
&context_);
return true;
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
protected:
float scale_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SCALE_OP_H_
| 1,019
| 22.181818
| 76
|
h
|
null |
pytorch-main/caffe2/operators/self_binning_histogram_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
#include <algorithm>
#include <cmath>
#include <limits>
namespace caffe2 {
template <class Context>
class SelfBinningHistogramOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SelfBinningHistogramOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
num_bins_(this->template GetSingleArgument<int>("num_bins", 0)),
num_edges_(num_bins_ + 1),
bin_spacing_(this->template GetSingleArgument<std::string>(
"bin_spacing",
"linear")),
logspace_start_(this->template GetSingleArgument<float>("logspace_start", 1e-24)),
abs_(this->template GetSingleArgument<bool>("abs", false))
{
CAFFE_ENFORCE_GE(
num_bins_, 1, "Number of bins must be greater than or equal to 1.");
CAFFE_ENFORCE(
bin_spacing_ == "linear" || bin_spacing_ == "logarithmic",
"Bin spacing can be one of 'linear' or 'logarithmic'."
);
CAFFE_ENFORCE_GT(
logspace_start_, 0,
"Logarithmic spacing base is a multiplier and is expected to be >1.");
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<float, double>>::call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
CheckInputs();
// Scale the range so that the last count is always 0.
const T RANGE_SCALING = 1.0001;
const auto* histogram_values = Output(HISTOGRAM_VALUES);
histogram_values->Resize(num_edges_);
auto* histogram_values_data = histogram_values->template mutable_data<T>();
const auto* histogram_counts = Output(HISTOGRAM_COUNTS);
histogram_counts->Resize(num_edges_);
auto* histogram_counts_data =
histogram_counts->template mutable_data<int64_t>();
// Calculate the max and min.
bool first_seen = false;
// 0 initialization is arbitrary here to suppress linter warnings.
// The actual initialization check happens through the first_seen variable.
T max = 0;
T min = 0;
int64_t total_count = 0;
for (const auto input_idx : c10::irange(InputSize())) {
const auto& x = Input(input_idx);
const int64_t N = x.numel();
total_count += N;
const auto* x_data = x.template data<T>();
for (const auto data_idx : c10::irange(N)) {
const T val = this->abs_ ? std::abs(x_data[data_idx]) : x_data[data_idx];
if (!first_seen) {
max = val;
min = val;
first_seen = true;
} else {
max = std::max(val, max);
min = std::min(val, min);
}
}
}
if (!first_seen) {
math::Set<T, Context>(num_edges_, 0, histogram_values_data, &context_);
math::Set<int64_t, Context>(
num_edges_, 0, histogram_counts_data, &context_);
return true;
}
CAFFE_ENFORCE(min <= max, "Incorrect min-max computation min=", min, " max=", max);
T scaled_max = 0; // this is set in both branches
if (bin_spacing_ == "linear") {
// Let's scale the range so that the last count is 0.
scaled_max = min + (max - min) * RANGE_SCALING;
T scaled_range = (scaled_max - min);
// Avoid underflow by calculating advancement through multiplication.
for (const auto i : c10::irange(num_edges_)) {
T advancement_ratio = T(i) / num_bins_;
histogram_values_data[i] = min + advancement_ratio * scaled_range;
}
} else if (bin_spacing_ == "logarithmic") {
// First, we need to sanitize the range.
if (min < logspace_start_) {
min = logspace_start_;
}
if (max < logspace_start_) {
max = logspace_start_;
}
T linear_range = max - min;
linear_range = linear_range * RANGE_SCALING;
scaled_max = min + linear_range;
// Calculate base interval using geometric sum.
// Simply: multiplier = exp((log(max) - log(min))/N)
// Avoid underflow by delaying division and exp.
T log_multiplier_numerator =log(scaled_max) - log(min);
// Avoid underflow by:
// - Calculating each advancement separately for each i.
for (const auto i : c10::irange(num_edges_)) {
T advancement_ratio = T(i)/num_bins_;
histogram_values_data[i] = min * exp(log_multiplier_numerator * advancement_ratio);
}
}
math::Set<int64_t, Context>(
num_edges_, 0, histogram_counts_data, &context_);
if (histogram_values_data[num_edges_-1] <= max) {
// In cases of min&max being equal (or any unexpected numerical underflow) we
// may not have a final edge larger than the max.
histogram_values_data[num_edges_-1] = scaled_max;
histogram_counts_data[0] = total_count;
}
else {
for (const auto input_idx : c10::irange(InputSize())) {
const auto& x = Input(input_idx);
const int64_t N = x.numel();
const auto* x_data = x.template data<T>();
for (const auto data_idx : c10::irange(N)) {
const T val = this->abs_ ? std::abs(x_data[data_idx]) : x_data[data_idx];
const auto bisection_it = std::upper_bound(
histogram_values_data,
histogram_values_data + num_edges_,
val);
const int bisection_idx = bisection_it - histogram_values_data;
if (bisection_idx > 0 && bisection_idx < num_edges_) {
histogram_counts_data[bisection_idx - 1]++;
}
if (bisection_idx == 0) {
histogram_counts_data[0]++;
}
}
}
}
return true;
}
protected:
OUTPUT_TAGS(HISTOGRAM_VALUES, HISTOGRAM_COUNTS);
private:
int num_bins_;
int num_edges_;
std::string bin_spacing_;
float logspace_start_;
bool abs_; // automatically apply abs() on the input values
void CheckInputs() {
const auto& input_zero = Input(0);
for (const auto i : c10::irange(1, InputSize())) {
CAFFE_ENFORCE_EQ(
Input(i).dtype(),
input_zero.dtype(),
"All inputs must have the same type; expected ",
input_zero.dtype().name(),
" but got ",
Input(i).dtype().name(),
" for input ",
i);
}
}
};
} // namespace caffe2
| 6,289
| 33.371585
| 91
|
h
|
null |
pytorch-main/caffe2/operators/selu_op.h
|
#ifndef CAFFE2_OPERATORS_SELU_OP_H_
#define CAFFE2_OPERATORS_SELU_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class SeluOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SeluOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
alpha_ = this->template GetSingleArgument<T>(
"alpha", 1.6732632423543772848170429916717f);
lambda_ = this->template GetSingleArgument<T>(
"scale", 1.0507009873554804934193349852946f);
// In the paper "scale" is named "lambda", but "lambda" is a reserved
// keyword in python
CAFFE_ENFORCE_GT(lambda_, 1.0);
}
bool RunOnDevice() override;
protected:
T alpha_;
T lambda_;
};
template <typename T, class Context>
class SeluGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SeluGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {
alpha_ = this->template GetSingleArgument<T>(
"alpha", 1.6732632423543772848170429916717f);
lambda_ = this->template GetSingleArgument<T>(
"scale", 1.0507009873554804934193349852946f);
CAFFE_ENFORCE_GT(lambda_, 1.0);
}
bool RunOnDevice() override;
protected:
T alpha_;
T lambda_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SELU_OP_H_
| 1,545
| 25.20339
| 73
|
h
|
null |
pytorch-main/caffe2/operators/sequence_ops.h
|
#ifndef CAFFE2_OPERATORS_SEQUENCE_OPS_H_
#define CAFFE2_OPERATORS_SEQUENCE_OPS_H_
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class GatherPaddingOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit GatherPaddingOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
startPaddingWidth_(
this->template GetSingleArgument<int>("padding_width", 1)),
endPaddingWidth_(
this->template GetSingleArgument<int>("end_padding_width", -1)) {
CAFFE_ENFORCE_GE(startPaddingWidth_, 0);
if (endPaddingWidth_ < 0) {
endPaddingWidth_ = startPaddingWidth_;
}
}
bool RunOnDevice() override {
if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {
Output(0)->Resize(std::vector<int64_t>(0));
auto output_0_data = Output(0)->template mutable_data<int64_t>();
// TODO(zhengxq): as suggested by salex@, change this to a loop.
math::Set<int64_t, Context>(
Output(0)->numel(), 0, output_0_data, &context_);
if (OutputSize() == 2) {
Output(1)->Resize(std::vector<int64_t>(0));
auto output_1_data = Output(1)->template mutable_data<int64_t>();
math::Set<int64_t, Context>(
Output(1)->numel(), 0, output_1_data, &context_);
}
return true;
}
return DispatchHelper<TensorTypes<float, double, int, int64_t, bool>>::call(
this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& in = Input(0);
CAFFE_ENFORCE_GE(in.dim(), 1);
const int32_t outer_size = in.sizes()[0];
const auto block_size = in.size_from_dim(1);
const auto pad_width = startPaddingWidth_ + endPaddingWidth_;
// if no lengths is provided, assume it is a single full-span entry
const int32_t* lengths_ptr = &outer_size;
int64_t lengths_size = 1;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_ptr = lengths.template data<int32_t>();
lengths_size = lengths.numel();
}
std::vector<int64_t> padShape(in.sizes().begin() + 1, in.sizes().end());
// output will contain accumulator over paddings
Output(0)->Resize(padShape);
T* padding_start_ptr = Output(0)->template mutable_data<T>();
math::Set<T, Context>(block_size, 0.0, padding_start_ptr, &context_);
// if no end_padding is provided, assume it's the same as start_padding
T* padding_end_ptr = padding_start_ptr;
if (OutputSize() == 2) {
Output(1)->Resize(padShape);
padding_end_ptr = Output(1)->template mutable_data<T>();
math::Set<T, Context>(block_size, 0.0, padding_end_ptr, &context_);
}
GatherPadding<T>(
outer_size,
lengths_size,
block_size,
pad_width,
in.template data<T>(),
lengths_ptr,
padding_start_ptr,
padding_end_ptr);
return true;
}
private:
template <typename T>
void GatherPadding(
const int outer_size,
const int lengths_size,
const int block_size,
const int pad_width,
const T* in_ptr,
const int* lengths_ptr,
T* padding_start_ptr,
T* padding_end_ptr);
int startPaddingWidth_;
int endPaddingWidth_;
// Scratch space required by the CUDA version
Tensor lengths_prefix_sum_buffer_{Context::GetDeviceType()};
Tensor lengths_prefix_sum_{Context::GetDeviceType()};
};
template <class Context>
class RemovePaddingOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit RemovePaddingOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
startPaddingWidth_(
this->template GetSingleArgument<int>("padding_width", 1)),
endPaddingWidth_(
this->template GetSingleArgument<int>("end_padding_width", -1)) {
CAFFE_ENFORCE_GE(startPaddingWidth_, 0);
if (endPaddingWidth_ < 0) {
endPaddingWidth_ = startPaddingWidth_;
}
}
bool RunOnDevice() override {
if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {
Output(0)->CopyFrom(Input(0), true /*async*/);
if (OutputSize() == 2) {
Output(1)->CopyFrom(Input(1), true /*async*/);
}
return true;
}
return DispatchHelper<TensorTypes<float, double, int, int64_t, bool>>::call(
this, Input(0));
}
template <typename T>
bool DoRunWithType();
private:
int startPaddingWidth_;
int endPaddingWidth_;
// Scratch space required by the CUDA version
Tensor lengths_prefix_sum_buffer_{Context::GetDeviceType()};
Tensor lengths_prefix_sum_{Context::GetDeviceType()};
};
template <class Context>
class AddPaddingOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit AddPaddingOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
startPaddingWidth_(
this->template GetSingleArgument<int>("padding_width", 1)),
endPaddingWidth_(
this->template GetSingleArgument<int>("end_padding_width", -1)) {
CAFFE_ENFORCE_GE(startPaddingWidth_, 0);
if (endPaddingWidth_ < 0) {
endPaddingWidth_ = startPaddingWidth_;
}
}
bool RunOnDevice() override {
if (startPaddingWidth_ == 0 && endPaddingWidth_ == 0) {
Output(0)->CopyFrom(Input(0), true /*async*/);
if (OutputSize() == 2) {
Output(1)->CopyFrom(Input(1), true /*async*/);
}
return true;
}
return DispatchHelper<TensorTypes<float, double, int, int64_t, bool>>::call(
this, Input(0));
}
template <typename T>
bool DoRunWithType() {
const auto& in = Input(0);
CAFFE_ENFORCE_GE(in.dim(), 1);
const int32_t outer_size = in.sizes()[0];
const auto block_size = in.size_from_dim(1);
// if no lengths is provided, assume it is a single full-span entry
const int32_t* lengths_ptr = nullptr;
int32_t lengths_size = 1;
if (InputSize() > 1) {
const auto& lengths = Input(1);
lengths_ptr = lengths.template data<int32_t>();
lengths_size = lengths.numel();
}
// fetch paddings
// input_size == 2 : pad with zeros
// input_size == 3 : start and end paddings are the same
// input_size == 4 : different start and end paddings
const T* padding_start_ptr = nullptr;
const T* padding_end_ptr = nullptr;
if (InputSize() >= 3) {
auto& padding_start = Input(2);
CAFFE_ENFORCE_EQ(block_size, padding_start.numel());
padding_start_ptr = padding_start.template data<T>();
}
if (InputSize() == 4) {
auto& padding_end = Input(3);
CAFFE_ENFORCE_EQ(block_size, padding_end.numel());
padding_end_ptr = padding_end.template data<T>();
} else {
padding_end_ptr = padding_start_ptr;
}
auto out_dims = in.sizes().vec();
out_dims[0] += (startPaddingWidth_ + endPaddingWidth_) * lengths_size;
auto* out = Output(0, std::move(out_dims), at::dtype<T>());
const auto* in_ptr = in.template data<T>();
auto* out_ptr = out->template mutable_data<T>();
return MakePadding<T>(
in_ptr,
out_ptr,
lengths_ptr,
lengths_size,
outer_size,
padding_start_ptr,
padding_end_ptr,
block_size);
}
private:
template <typename T>
bool MakePadding(
const T* in_ptr,
T* out_ptr,
const int32_t* lengths_ptr,
int32_t lengths_size,
int32_t outer_size,
const T* padding_start_ptr,
const T* padding_end_ptr,
int64_t block_size);
int startPaddingWidth_;
int endPaddingWidth_;
// Scratch space required by the CUDA version
Tensor lengths_prefix_sum_buffer_{Context::GetDeviceType()};
Tensor lengths_prefix_sum_{Context::GetDeviceType()};
};
template <class Context>
class PadEmptySamplesOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit PadEmptySamplesOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() override;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SEQUENCE_OPS_H_
| 8,264
| 30.425856
| 80
|
h
|
null |
pytorch-main/caffe2/operators/shape_op.h
|
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
namespace caffe2 {
// RecordShapeOp records the shape of the input tensor to a vector of int. You
// mostly don't need this operator explicitly, and it is mostly used in the
// autodiff process.
template <class Context>
class ShapeOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ShapeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axes_(OperatorBase ::GetRepeatedArgument<int>("axes")) {}
bool RunOnDevice() override {
auto& data = Input(DATA);
int numDims = data.dim();
int numAxes = axes_.size();
if (numAxes == 0) {
auto* output = Output(0, {numDims}, at::dtype<int64_t>());
int64_t* output_data = output->template mutable_data<int64_t>();
context_.CopyBytesSameDevice(
numDims * sizeof(int64_t), data.sizes().data(), output_data);
return true;
}
auto* output = Output(0, {numAxes}, at::dtype<int64_t>());
auto src = reinterpret_cast<const char*>(data.sizes().data());
auto out = reinterpret_cast<char*>(output->template mutable_data<int64_t>());
for (const auto i : c10::irange(numAxes)) {
auto axis = axes_[i];
CAFFE_ENFORCE_LT(axis, numDims, "Axis out of range");
CAFFE_ENFORCE_GE(axis, 0, "Each axis should be non-negative");
context_.CopyBytesSameDevice(
sizeof(int64_t), src + axis * sizeof(int64_t), out);
out += sizeof(int64_t);
}
return true;
}
INPUT_TAGS(DATA);
private:
vector<int> axes_;
};
} // namespace caffe2
| 1,675
| 28.928571
| 81
|
h
|
null |
pytorch-main/caffe2/operators/sinusoid_position_encoding_op.h
|
#ifndef CAFFE2_OPERATORS_SINUSOID_POSITION_ENCODING_OP_H_
#define CAFFE2_OPERATORS_SINUSOID_POSITION_ENCODING_OP_H_
#ifdef _MSC_VER
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#endif // _MSC_VER
#include <cmath>
#include "caffe2/core/operator.h"
#include "Eigen/Core"
#include "caffe2/utils/eigen_utils.h"
namespace caffe2 {
template <class Context>
class SinusoidPositionEncodingOp : public Operator<Context> {
public:
template <class... Args>
explicit SinusoidPositionEncodingOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
embedding_size_(
this->template GetSingleArgument<int>("embedding_size", 100)),
alpha_(this->template GetSingleArgument<float>("alpha", 10000)),
amplitude_(this->template GetSingleArgument<float>("amplitude", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, this->template Input<Tensor>(0, CPU));
}
template <typename Index>
bool DoRunWithType() {
auto& positions = Input(0);
CAFFE_ENFORCE_EQ(positions.dim(), 2, "POSITIONS should be a 2-D tensor");
auto shape = positions.sizes().vec();
shape.push_back(embedding_size_);
auto* output = Output(0, shape, at::dtype<float>());
int M = shape[0];
int K = shape[1];
const Index* idxs = positions.template data<Index>();
float* out = output->template mutable_data<float>();
float log_alpha = std::log(alpha_);
float max_alpha_pow =
((float)embedding_size_ - 1.0f) / (float)embedding_size_;
for (const auto i : c10::irange(M)) {
float pos = (float)idxs[i * K];
// Compute the embedding for position i, example 0 first
float* row = &out[i * K * embedding_size_];
Eigen::Map<Eigen::VectorXf> row_map(row, embedding_size_, 1);
auto row_array = row_map.array();
float log_pos = std::log(pos);
row_array.setLinSpaced(
embedding_size_, log_pos, log_pos - log_alpha * max_alpha_pow);
row_array = row_array.exp().eval();
// row_array[k] == pos / alpha^(k / embedding_size)
// Phase shift so that alternating elements are cosines
for (int k = 1; k < embedding_size_; k += 2) {
row[k] += (float)M_PI_2;
}
row_array = amplitude_ * row_array.sin().eval();
// Copy the embedding to position i in the other examples
for (const auto j : c10::irange(1, K)) {
int base = i * K * embedding_size_;
std::copy(
&out[base],
&out[base + embedding_size_],
&out[base + j * embedding_size_]);
}
}
return true;
}
protected:
int embedding_size_;
float alpha_;
float amplitude_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SINUSOID_POSITION_ENCODING_OP_H_
| 2,853
| 29.042105
| 78
|
h
|
null |
pytorch-main/caffe2/operators/slice_op.h
|
#pragma once
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <class SIndex, class Context>
bool SliceImpl(
Tensor* output,
const Tensor& data,
const Tensor& starts,
const Tensor& ends,
Context* context,
Tensor* gdata = nullptr,
const Tensor* go = nullptr) {
bool backward = output == nullptr;
auto* starts_data = starts.template data<SIndex>();
auto* ends_data = ends.template data<SIndex>();
CAFFE_ENFORCE_EQ(starts.dim(), 1);
CAFFE_ENFORCE_EQ(ends.dim(), 1);
CAFFE_ENFORCE_GE(data.dim(), starts.numel());
CAFFE_ENFORCE_EQ(starts.numel(), ends.numel());
std::vector<SIndex> starts_idx(data.dim());
std::vector<SIndex> ends_idx(data.dim());
std::vector<SIndex> dst_sizes(data.dim());
for (const auto i : c10::irange(data.dim())) {
if (i >= starts.numel()) {
starts_idx[i] = 0;
ends_idx[i] = data.size(i);
dst_sizes[i] = data.size(i);
continue;
}
if (data.size(i) > 0) {
auto start = starts_data[i];
auto end = ends_data[i];
if (start < 0) {
start = data.size(i) + 1 + start;
}
if (end < 0) {
end = data.size(i) + 1 + end;
}
if (start > data.size(i)) {
start = data.size(i);
}
if (end > data.size(i)) {
end = data.size(i);
}
CAFFE_ENFORCE_GE(start, 0);
CAFFE_ENFORCE_GE(end, 0);
CAFFE_ENFORCE_GE(end, start);
starts_idx[i] = start;
ends_idx[i] = end;
dst_sizes[i] = end - start;
} else {
starts_idx[i] = 0;
ends_idx[i] = 0;
dst_sizes[i] = 0;
}
}
if (data.numel() <= 0) {
// When the input is empty, we do not need to do copy.
if (!backward) {
output->Resize(dst_sizes);
output->raw_mutable_data(data.dtype());
} else {
gdata->ResizeLike(data);
gdata->raw_mutable_data(go->dtype());
}
return true;
}
// for now only supports slicing in 1 dimension
int dim = -1;
for (const auto i : c10::irange(data.dim())) {
if (starts_idx[i] > 0 || ends_idx[i] < data.size(i)) {
CAFFE_ENFORCE_EQ(
dim, -1, "Currently only possible to slice in 1 dimension.");
dim = i;
}
}
if (dim == -1) {
if (!backward) {
output->CopyFrom(data, true /*async*/);
} else {
gdata->CopyFrom(*go, true /*async*/);
}
return true;
}
size_t unit = std::accumulate(
data.sizes().begin() + dim + 1,
data.sizes().end(),
1,
std::multiplies<SIndex>());
size_t num_blocks = std::accumulate(
data.sizes().begin(),
data.sizes().begin() + dim,
1,
std::multiplies<SIndex>());
if (!backward) {
output->Resize(dst_sizes);
} else {
gdata->ResizeLike(data);
}
size_t itemsize = data.dtype().itemsize();
if (!backward) {
char* src_bytes = (char*)data.raw_data();
char* dst_bytes = (char*)output->raw_mutable_data(data.dtype());
size_t src_nbytes = data.nbytes();
size_t dst_nbytes = output->nbytes();
size_t src_block_size = unit * data.size(dim);
size_t dst_block_size = unit * (ends_idx[dim] - starts_idx[dim]);
size_t src_offset = unit * starts_idx[dim];
if (num_blocks == 0 || dst_block_size == 0) {
return true;
}
size_t src_block_size_bytes = itemsize * src_block_size;
size_t dst_block_size_bytes = itemsize * dst_block_size;
char* src_offset_bytes = src_bytes + itemsize * src_offset;
char* dst_offset_bytes = dst_bytes;
for (const auto i : c10::irange(num_blocks)) {
char* local_src_offset_bytes =
src_offset_bytes + i * src_block_size_bytes;
char* local_dst_offset_bytes =
dst_offset_bytes + i * dst_block_size_bytes;
TORCH_DCHECK_LE(
static_cast<void*>(local_src_offset_bytes + dst_block_size_bytes),
static_cast<void*>(src_bytes + src_nbytes));
TORCH_DCHECK_LE(
static_cast<void*>(local_dst_offset_bytes + dst_block_size_bytes),
static_cast<void*>(dst_bytes + dst_nbytes));
context->CopyItemsSameDevice(
data.dtype(),
dst_block_size,
(void*)local_src_offset_bytes,
(void*)local_dst_offset_bytes);
}
} else {
char* src_bytes = (char*)go->raw_data();
char* dst_bytes = (char*)gdata->raw_mutable_data(go->dtype());
size_t src_nbytes = go->nbytes();
size_t dst_nbytes = gdata->nbytes();
size_t src_block_size = unit * (ends_idx[dim] - starts_idx[dim]);
size_t dst_block_size = unit * data.size(dim);
size_t dst_offset = unit * starts_idx[dim];
if (num_blocks == 0 || dst_block_size == 0) {
return true;
}
size_t src_block_size_bytes = itemsize * src_block_size;
size_t dst_block_size_bytes = itemsize * dst_block_size;
char* src_offset_bytes = src_bytes;
char* dst_offset_bytes = dst_bytes + itemsize * dst_offset;
// Zero out gradient blob before copy since we copy in fewer items than
// there is space for
math::Set<char, Context>(dst_nbytes, 0, dst_bytes, context);
// If output tensor is empty, just return zeroed gradient tensor
if (!src_bytes) {
return true;
}
for (const auto i : c10::irange(num_blocks)) {
char* local_src_offset_bytes =
src_offset_bytes + i * src_block_size_bytes;
char* local_dst_offset_bytes =
dst_offset_bytes + i * dst_block_size_bytes;
TORCH_DCHECK_LE(
local_src_offset_bytes + src_block_size_bytes,
src_bytes + src_nbytes);
TORCH_DCHECK_LE(
local_dst_offset_bytes + src_block_size_bytes,
dst_bytes + dst_nbytes);
context->CopyItemsSameDevice(
go->dtype(),
src_block_size,
(void*)local_src_offset_bytes,
(void*)local_dst_offset_bytes);
}
}
return true;
}
template <class Context>
class SliceOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SliceOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
starts_(this->template GetRepeatedArgument<int64_t>("starts")),
ends_(this->template GetRepeatedArgument<int64_t>("ends")),
statically_inited_(false) {}
bool RunOnDevice() override {
if (InputSize() > 1) {
return DispatchHelper<TensorTypes<int, int64_t>>::call(this, Input(1));
} else {
return DoRunWithType<int64_t>();
}
}
template <typename SIndex>
bool DoRunWithType() {
if (InputSize() > 1) {
ReinitializeAndCopyFrom(&starts_host_, at::dtype<SIndex>().device(CPU), Input(1));
ReinitializeAndCopyFrom(&ends_host_, at::dtype<SIndex>().device(CPU), Input(2));
} else {
if (!statically_inited_) {
CAFFE_ENFORCE(HasArgument("starts"));
CAFFE_ENFORCE(HasArgument("ends"));
CAFFE_ENFORCE_EQ(starts_.size(), ends_.size());
ReinitializeTensor(&starts_host_, {static_cast<int64_t>(starts_.size())}, at::dtype<SIndex>().device(CPU));
ReinitializeTensor(&ends_host_, {static_cast<int64_t>(ends_.size())}, at::dtype<SIndex>().device(CPU));
memcpy(
starts_host_.template mutable_data<SIndex>(),
starts_.data(),
sizeof(SIndex) * starts_.size());
memcpy(
ends_host_.template mutable_data<SIndex>(),
ends_.data(),
sizeof(SIndex) * ends_.size());
statically_inited_ = true;
}
}
const auto& data = Input(0);
auto output = Output(0);
return SliceImpl<SIndex, Context>(
output, data, starts_host_, ends_host_, &context_);
}
C10_DISABLE_COPY_AND_ASSIGN(SliceOp);
protected:
std::vector<int64_t> starts_;
std::vector<int64_t> ends_;
bool statically_inited_;
Tensor starts_host_;
Tensor ends_host_;
};
template <class Context>
class SliceGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SliceGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
starts_(this->template GetRepeatedArgument<int64_t>("starts")),
ends_(this->template GetRepeatedArgument<int64_t>("ends")),
statically_inited_(false) {}
C10_DISABLE_COPY_AND_ASSIGN(SliceGradientOp);
bool RunOnDevice() override {
if (InputSize() == 4) {
return DispatchHelper<TensorTypes<int, int64_t>>::call(this, Input(1));
} else {
return DoRunWithType<int64_t>();
}
}
template <typename SIndex>
bool DoRunWithType() {
auto* gdata = Output(0);
auto& data = Input(0);
if (InputSize() == 4) {
ReinitializeAndCopyFrom(&starts_host_, at::dtype<SIndex>().device(CPU), Input(1));
ReinitializeAndCopyFrom(&ends_host_, at::dtype<SIndex>().device(CPU), Input(2));
auto& go = Input(3);
return SliceImpl<SIndex, Context>(
nullptr, data, starts_host_, ends_host_, &context_, gdata, &go);
} else {
if (!statically_inited_) {
CAFFE_ENFORCE(HasArgument("starts"));
CAFFE_ENFORCE(HasArgument("ends"));
CAFFE_ENFORCE_EQ(starts_.size(), ends_.size());
ReinitializeTensor(
&starts_host_, {static_cast<int64_t>(starts_.size())}, at::dtype<SIndex>().device(CPU));
ReinitializeTensor(
&ends_host_, {static_cast<int64_t>(ends_.size())}, at::dtype<SIndex>().device(CPU));
memcpy(
starts_host_.template mutable_data<SIndex>(),
starts_.data(),
sizeof(SIndex) * starts_.size());
memcpy(
ends_host_.template mutable_data<SIndex>(),
ends_.data(),
sizeof(SIndex) * ends_.size());
statically_inited_ = true;
}
auto& go = Input(1);
return SliceImpl<SIndex, Context>(
nullptr, data, starts_host_, ends_host_, &context_, gdata, &go);
}
}
private:
std::vector<int64_t> starts_;
std::vector<int64_t> ends_;
bool statically_inited_;
Tensor starts_host_;
Tensor ends_host_;
};
} // namespace caffe2
| 10,150
| 29.21131
| 115
|
h
|
null |
pytorch-main/caffe2/operators/softmax_op.h
|
#ifndef CAFFE2_OPERATORS_SOFTMAX_OP_H_
#define CAFFE2_OPERATORS_SOFTMAX_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SoftmaxOp final : public Operator<Context> {
public:
template <class... Args>
explicit SoftmaxOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axis_(this->template GetSingleArgument<int>("axis", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
int axis_;
Tensor scale_;
Tensor rowmax_;
Tensor sum_multiplier_;
};
template <typename T, class Context>
class SoftmaxGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit SoftmaxGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
axis_(this->template GetSingleArgument<int>("axis", 1)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
int axis_;
Tensor scale_;
Tensor sum_multiplier_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SOFTMAX_OP_H_
| 1,174
| 23.479167
| 66
|
h
|
null |
pytorch-main/caffe2/operators/softmax_with_loss_op.h
|
#ifndef SOFTMAX_WITH_LOSS_OP_H_
#define SOFTMAX_WITH_LOSS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class SoftmaxWithLossOp final : public Operator<Context> {
public:
template <class... Args>
explicit SoftmaxWithLossOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
scale_(this->template GetSingleArgument<float>("scale", 1.)),
label_prob_mode_(
this->template GetSingleArgument<int>("label_prob", 0)),
average_by_batch_size_(
this->template GetSingleArgument<int>("average_by_batch_size", 0)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))),
axis_(this->template GetSingleArgument<int>("axis", 1)) {
CAFFE_ENFORCE(scale_ >= 0);
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
float scale_;
int label_prob_mode_;
int average_by_batch_size_;
StorageOrder order_;
int axis_;
Tensor losses_; // Per example loss
Tensor rowmax_; // per example row max
Tensor weights_; // unignored weights
Tensor sum_multiplier_; // Vector of ones for summing via dot prod
Tensor total_weight_ptr_;
// passed to a function
Tensor scratch_{Context::GetDeviceType()};
};
template <typename T, class Context>
class SoftmaxWithLossGradientOp final : public Operator<Context> {
public:
template <class... Args>
explicit SoftmaxWithLossGradientOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
scale_(this->template GetSingleArgument<float>("scale", 1.)),
label_prob_mode_(
this->template GetSingleArgument<int>("label_prob", 0)),
average_by_batch_size_(
this->template GetSingleArgument<int>("average_by_batch_size", 0)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))),
only_loss_(this->template GetSingleArgument<bool>("only_loss", false)),
axis_(this->template GetSingleArgument<int>("axis", 1)) {
CAFFE_ENFORCE(scale_ >= 0);
CAFFE_ENFORCE_EQ(
order_, StorageOrder::NCHW, "Only NCHW order is supported right now.");
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
float scale_;
int label_prob_mode_;
int average_by_batch_size_;
// not used?
Tensor sum_multiplier_{Context::GetDeviceType()};
Tensor weights_; // unignored weights
Tensor total_weight_ptr_;
StorageOrder order_;
bool only_loss_;
int axis_;
Tensor scratch_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // SOFTMAX_WITH_LOSS_OP_H_
| 2,883
| 31.404494
| 79
|
h
|
null |
pytorch-main/caffe2/operators/softplus_op.h
|
#ifndef CAFFE2_OPERATORS_SOFTPLUS_OP_H_
#define CAFFE2_OPERATORS_SOFTPLUS_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T, class Context>
class SoftplusOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(SoftplusOp)
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
};
template <typename T, class Context>
class SoftplusGradientOp final : public Operator<Context> {
public:
USE_SIMPLE_CTOR_DTOR(SoftplusGradientOp)
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override;
protected:
// Input: Y, dY; Output: dX
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SOFTPLUS_OP_H_
| 781
| 20.135135
| 59
|
h
|
null |
pytorch-main/caffe2/operators/softsign_op.h
|
#ifndef CAFFE2_OPERATORS_SOFTSIGN_OP_H_
#define CAFFE2_OPERATORS_SOFTSIGN_OP_H_
#include <vector>
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
struct SoftsignFunctor {
template <typename T>
bool operator()(const int N, const T* X, T* Y, Context* context) const;
};
template <class Context>
struct SoftsignGradientFunctor {
template <typename T>
bool Forward(
const std::vector<int>& X_dims,
const std::vector<int>& dY_dims,
const T* X,
const T* dY,
T* dX,
Context* context) const;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SOFTSIGN_OP_H_
| 675
| 20.125
| 73
|
h
|
null |
pytorch-main/caffe2/operators/space_batch_op.h
|
#ifndef CAFFE2_OPERATORS_SPACE_BATCH_OP_H_
#define CAFFE2_OPERATORS_SPACE_BATCH_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
template <typename Context>
void spaceToBatch(
const Tensor& input,
int pad_t,
int pad_l,
int block_size,
Tensor* output,
Context* /*context*/) {
CAFFE_ENFORCE(input.dim() == 4);
CAFFE_ENFORCE(output->dim() == 4);
const int output_batch = output->dim32(0);
const int output_depth = output->dim32(1);
const int output_height = output->dim32(2);
const int output_width = output->dim32(3);
const int input_batch = input.dim32(0);
const int input_depth = input.dim32(1);
const int input_height = input.dim32(2);
const int input_width = input.dim32(3);
for (const auto out_b : c10::irange(output_batch)) {
const int in_b = out_b % input_batch;
const int offset_w = (out_b / input_batch) % block_size;
const int offset_h = (out_b / input_batch) / block_size;
for (const auto d : c10::irange(input_depth)) {
for (const auto out_h : c10::irange(output_height)) {
const int in_h = out_h * block_size + offset_h - pad_t;
for (const auto out_w : c10::irange(output_width)) {
const int in_w = out_w * block_size + offset_w - pad_l;
const auto output_offset =
((out_b * output_depth + d) * output_height + out_h) *
output_width +
out_w;
const auto input_offset =
((in_b * input_depth + d) * input_height + in_h) * input_width +
in_w;
if (in_h >= 0 && in_w >= 0 && in_h < input_height &&
in_w < input_width) {
output->template mutable_data<float>()[output_offset] =
input.template data<float>()[input_offset];
} else {
output->template mutable_data<float>()[output_offset] = 0.0;
}
}
}
}
}
}
template <typename Context>
void batchToSpace(
const Tensor& input,
int pad_t,
int pad_l,
int block_size,
Tensor* output,
Context* /*context*/) {
CAFFE_ENFORCE(input.dim() == 4);
CAFFE_ENFORCE(output->dim() == 4);
const int output_batch = output->dim32(0);
const int output_depth = output->dim32(1);
const int output_height = output->dim32(2);
const int output_width = output->dim32(3);
const int input_batch = input.dim32(0);
const int input_depth = input.dim32(1);
const int input_height = input.dim32(2);
const int input_width = input.dim32(3);
CAFFE_ENFORCE(input_depth == output_depth);
for (const auto in_b : c10::irange(input_batch)) {
const int out_b = in_b % output_batch;
const int offset_w = (in_b / output_batch) % block_size;
const int offset_h = (in_b / output_batch) / block_size;
for (const auto d : c10::irange(input_depth)) {
for (const auto in_h : c10::irange(input_height)) {
const int out_h = in_h * block_size + offset_h - pad_t;
for (const auto in_w : c10::irange(input_width)) {
const int out_w = in_w * block_size + offset_w - pad_l;
if (out_h >= 0 && out_w >= 0 && out_h < output_height &&
out_w < output_width) {
const auto output_offset =
((out_b * output_depth + d) * output_height + out_h) *
output_width +
out_w;
const auto input_offset =
((in_b * input_depth + d) * input_height + in_h) * input_width +
in_w;
output->template mutable_data<float>()[output_offset] =
input.template data<float>()[input_offset];
}
}
}
}
}
}
template <typename Context>
class SpaceBatchOpBase : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SpaceBatchOpBase(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
pad_(this->template GetSingleArgument<int>("pad", 0)),
pad_t_(this->template GetSingleArgument<int>("pad_t", pad_)),
pad_l_(this->template GetSingleArgument<int>("pad", pad_)),
pad_b_(this->template GetSingleArgument<int>("pad", pad_)),
pad_r_(this->template GetSingleArgument<int>("pad", pad_)),
block_size_(this->template GetSingleArgument<int>("block_size", 2)),
order_(StringToStorageOrder(
this->template GetSingleArgument<string>("order", "NCHW"))) {
CAFFE_ENFORCE(order_ == StorageOrder::NCHW);
}
protected:
int pad_;
int pad_t_;
int pad_l_;
int pad_b_;
int pad_r_;
int block_size_;
StorageOrder order_;
};
template <typename Context>
class SpaceToBatchOp final : public SpaceBatchOpBase<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
using SpaceBatchOpBase<Context>::SpaceBatchOpBase;
bool RunOnDevice() override {
const auto& input = Input(0);
auto* output = Output(0);
const int batch = input.dim32(0);
const int depth = input.dim32(1);
const int height = this->pad_b_ + this->pad_t_ + input.dim32(2);
const int width = this->pad_l_ + this->pad_r_ + input.dim32(3);
CAFFE_ENFORCE(
height % this->block_size_ == 0,
"Height: ",
height,
", block size: ",
this->block_size_);
CAFFE_ENFORCE(width % this->block_size_ == 0);
const int output_batch = batch * this->block_size_ * this->block_size_;
const int output_height = height / this->block_size_;
const int output_width = width / this->block_size_;
Output(0)->Resize(output_batch, depth, output_height, output_width);
spaceToBatch<Context>(
input,
this->pad_t_,
this->pad_l_,
this->block_size_,
output,
&context_);
return true;
}
};
template <typename Context>
class BatchToSpaceOp final : public SpaceBatchOpBase<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
using SpaceBatchOpBase<Context>::SpaceBatchOpBase;
bool RunOnDevice() override {
const auto& input = Input(0);
auto* output = Output(0);
const int batch = input.dim32(0);
const int depth = input.dim32(1);
const int height = input.dim32(2);
const int width = input.dim32(3);
const int output_batch = batch / this->block_size_ / this->block_size_;
const int output_height =
height * this->block_size_ - this->pad_b_ - this->pad_t_;
const int output_width =
width * this->block_size_ - this->pad_l_ - this->pad_r_;
Output(0)->Resize(output_batch, depth, output_height, output_width);
batchToSpace<Context>(
input,
this->pad_t_,
this->pad_l_,
this->block_size_,
output,
&context_);
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SPACE_BATCH_OP_H_
| 6,899
| 31.701422
| 80
|
h
|
null |
pytorch-main/caffe2/operators/sparse_dropout_with_replacement_op.h
|
#ifndef CAFFE2_OPERATORS_SPARSE_DROPOUT_WITH_REPLACEMENT_OP_H_
#define CAFFE2_OPERATORS_SPARSE_DROPOUT_WITH_REPLACEMENT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class SparseDropoutWithReplacementOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SparseDropoutWithReplacementOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
ratio_(this->template GetSingleArgument<float>("ratio", 0.0)),
replacement_value_(
this->template GetSingleArgument<int64_t>("replacement_value", 0)) {
// It is allowed to drop all or drop none.
CAFFE_ENFORCE_GE(ratio_, 0.0, "Ratio should be a valid probability");
CAFFE_ENFORCE_LE(ratio_, 1.0, "Ratio should be a valid probability");
}
bool RunOnDevice() override;
protected:
float ratio_;
int64_t replacement_value_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SPARSE_DROPOUT_WITH_REPLACEMENT_OP_H_
| 1,122
| 30.194444
| 80
|
h
|
null |
pytorch-main/caffe2/operators/sparse_itemwise_dropout_with_replacement_op.h
|
#ifndef CAFFE2_OPERATORS_SPARSE_ITEMWISE_DROPOUT_WITH_REPLACEMENT_OP_H_
#define CAFFE2_OPERATORS_SPARSE_ITEMWISE_DROPOUT_WITH_REPLACEMENT_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class SparseItemwiseDropoutWithReplacementOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SparseItemwiseDropoutWithReplacementOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
ratio_(this->template GetSingleArgument<float>("ratio", 0.0)),
replacement_value_(
this->template GetSingleArgument<int64_t>("replacement_value", 0)) {
// It is allowed to drop all or drop none.
CAFFE_ENFORCE_GE(ratio_, 0.0, "Ratio should be a valid probability");
CAFFE_ENFORCE_LE(ratio_, 1.0, "Ratio should be a valid probability");
}
bool RunOnDevice() override;
private:
float ratio_;
int64_t replacement_value_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SPARSE_ITEMWISE_DROPOUT_WITH_REPLACEMENT_OP_H_
| 1,163
| 31.333333
| 80
|
h
|
null |
pytorch-main/caffe2/operators/sparse_lp_regularizer_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class TORCH_API SparseLpRegularizerOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit SparseLpRegularizerOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
p_(this->template GetSingleArgument<float>("p", 2.0)),
reg_lambda_(
this->template GetSingleArgument<float>("reg_lambda", 1e-5)) {
CAFFE_ENFORCE(
p_ == 1.0 || p_ == 2.0,
"Sparse Lp regularizer only implemented for p=1 or p=2.");
CAFFE_ENFORCE_GT(
reg_lambda_,
0.0,
"Lambda for sparse Lp regularizer must be greater than 0.");
CAFFE_ENFORCE_LT(
reg_lambda_,
1.0,
"Lambda for sparse Lp regularizer must be less than 1.");
}
bool RunOnDevice() override;
template <typename SIndex>
bool DoRunWithType();
protected:
float p_;
float reg_lambda_;
INPUT_TAGS(PARAM, INDICES);
OUTPUT_TAGS(OUTPUT_PARAM);
};
} // namespace caffe2
| 1,130
| 24.704545
| 74
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.