repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/caffe2/core/distributions_stubs.h
|
#ifndef CAFFE2_CORE_DISTRIBUTIONS_STUBS_H_
#define CAFFE2_CORE_DISTRIBUTIONS_STUBS_H_
#include <c10/macros/Macros.h>
/**
* This file provides distributions compatible with
* ATen/core/DistributionsHelper.h but backed with the std RNG implementation
* instead of the ATen one.
*
* Caffe2 mobile builds currently do not depend on all of ATen so this is
* required to allow using the faster ATen RNG for normal builds but keep the
* build size small on mobile. RNG performance typically doesn't matter on
* mobile builds since the models are small and rarely using random
* initialization.
*/
namespace at {
namespace {
template <typename R, typename T>
struct distribution_adapter {
template <typename... Args>
C10_HOST_DEVICE inline distribution_adapter(Args... args)
: distribution_(std::forward<Args>(args)...) {}
template <typename RNG>
C10_HOST_DEVICE inline R operator()(RNG generator) {
return distribution_(*generator);
}
private:
T distribution_;
};
template <typename T>
struct uniform_int_from_to_distribution
: distribution_adapter<T, std::uniform_int_distribution<T>> {
C10_HOST_DEVICE inline uniform_int_from_to_distribution(
uint64_t range,
int64_t base)
: distribution_adapter<T, std::uniform_int_distribution<T>>(
base,
// std is inclusive, at is exclusive
base + range - 1) {}
};
template <typename T>
using uniform_real_distribution =
distribution_adapter<T, std::uniform_real_distribution<T>>;
template <typename T>
using normal_distribution =
distribution_adapter<T, std::normal_distribution<T>>;
template <typename T>
using bernoulli_distribution =
distribution_adapter<T, std::bernoulli_distribution>;
template <typename T>
using exponential_distribution =
distribution_adapter<T, std::exponential_distribution<T>>;
template <typename T>
using cauchy_distribution =
distribution_adapter<T, std::cauchy_distribution<T>>;
template <typename T>
using lognormal_distribution =
distribution_adapter<T, std::lognormal_distribution<T>>;
} // namespace
} // namespace at
#endif // CAFFE2_CORE_DISTRIBUTIONS_STUBS_H_
| 2,161
| 27.447368
| 77
|
h
|
null |
pytorch-main/caffe2/core/event.h
|
#ifndef CAFFE2_CORE_EVENT_H_
#define CAFFE2_CORE_EVENT_H_
#include <chrono>
#include <c10/core/DeviceType.h>
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
constexpr int MaxDeviceTypes =
DeviceTypeProto::PROTO_COMPILE_TIME_MAX_DEVICE_TYPES;
class Event;
enum EventStatus {
EVENT_INITIALIZED = 0,
EVENT_SCHEDULED = 1,
EVENT_SUCCESS = 2,
EVENT_FAILED = 3,
};
// For the following functions, void* shall be interpreted as the corresponding
// context object corresponding to the device type associated with the
// functions.
// Initializes event
typedef void (*EventCreateFunction)(const DeviceOption& option, Event*);
// Called on event to signal that CPU part of operation is finished,
// Optionally accepts error message from CPU part.
// Should be called no more than once per event
typedef void (*EventRecordFunction)(Event*, const void*, const char*);
// Waits and returns as soon as possible in order schedule next operation,
// e.g. for CUDA->CUDA waits only for CPU part of CUDA op,
// for CUDA->CPU waits till the CUDA op is fully completed.
// Prepares context to synchronize device part of operation.
// Can be called concurrently from multiple threads
typedef void (*EventWaitFunction)(const Event*, void*);
// Waits till operation is fully finished,
// can be called concurrently from multiple threads
typedef void (*EventFinishFunction)(const Event*);
// Queries current status of operation,
// can be called concurrently from multiple threads
typedef EventStatus (*EventQueryFunction)(const Event*);
typedef const std::string& (*EventErrorMessageFunction)(const Event*);
typedef void (*EventSetFinishedFunction)(const Event*, const char*);
typedef void (*EventResetFunction)(Event*);
// Sets callback that is called when event is finished
typedef std::function<void()> EventCallbackFunction;
typedef void (*EventSetCallbackFunction)(Event*, EventCallbackFunction);
class TORCH_API Event {
public:
explicit Event(const DeviceOption& option)
: event_(), type_(option.device_type()), option_(option) {
CAFFE_ENFORCE_LT(type_, MaxDeviceTypes);
CAFFE_ENFORCE(event_creator_[type_]);
event_creator_[type_](option, this);
}
// Nothing needs to be done in the destructor, as the event creator should
// set the proper destruction process for the unique_ptr.
~Event() {}
void Record(
DeviceType recorder_type,
const void* context,
const char* err_msg = nullptr) {
auto recorder_index = TypeToProto(recorder_type);
CAFFE_ENFORCE_EQ(
recorder_index,
type_,
"You are trying to record with a wrong device type.");
CAFFE_ENFORCE(event_recorder_[recorder_index]);
event_recorder_[recorder_index](this, context, err_msg);
}
void Wait(DeviceType waiter_type, void* context) const {
auto waiter_index = TypeToProto(waiter_type);
CAFFE_ENFORCE(event_waiter_[waiter_index][type_]);
event_waiter_[waiter_index][type_](this, context);
}
void Finish() const {
CAFFE_ENFORCE(event_finisher_[type_]);
event_finisher_[type_](this);
}
EventStatus Query() const {
CAFFE_ENFORCE(event_querier_[type_]);
return event_querier_[type_](this);
}
const std::string& ErrorMessage() const {
CAFFE_ENFORCE(event_err_msg_getter_[type_]);
return event_err_msg_getter_[type_](this);
}
void Reset() {
CAFFE_ENFORCE(event_resetter_[type_]);
event_resetter_[type_](this);
#ifdef CAFFE2_USE_EXCEPTION_PTR
caught_exception_ = nullptr;
#endif // CAFFE2_USE_EXCEPTION_PTR
error_timestamp_ = 0;
}
const DeviceOption& GetDeviceOption() const {
return option_;
}
bool IsScheduled() const {
return Query() == EventStatus::EVENT_SCHEDULED;
}
bool IsFinished() const {
auto status = Query();
return status == EventStatus::EVENT_SUCCESS ||
status == EventStatus::EVENT_FAILED;
}
void SetFinished(const char* err_msg = nullptr) {
typedef std::chrono::high_resolution_clock clock;
error_timestamp_ = std::chrono::duration_cast<std::chrono::nanoseconds>(
clock::now().time_since_epoch())
.count();
CAFFE_ENFORCE(event_finished_setter_[type_]);
return event_finished_setter_[type_](this, err_msg);
}
bool SupportsCallback() const {
return event_callback_setter_[type_] != nullptr;
}
void SetCallback(EventCallbackFunction callback) {
CAFFE_ENFORCE(
event_callback_setter_[type_], "Event does not support callbacks");
event_callback_setter_[type_](this, callback);
}
// If parent op has succeeded, then we can run any child op;
// If parent op is in scheduled state, we need to check that:
// - child op supports async scheduling
// - there's a way to setup synchronization between async parent and
// child - both child and parent should use the same type of device,
// non-blocking synchronization between different device types is not
// supported
// If parent op is in another state (initialized or failed) then scheduling
// is not possible
bool CanSchedule(const Event& child_event, bool supports_async) const {
return CanSchedule(type_, Query(), child_event.GetType(), supports_async);
}
static bool CanSchedule(
int parent_type,
EventStatus parent_status,
int child_type,
bool child_supports_async) {
if (parent_status == EventStatus::EVENT_SUCCESS) {
return true;
}
if (parent_status == EventStatus::EVENT_SCHEDULED) {
return (parent_type == child_type) && child_supports_async;
}
return false;
}
int GetType() const {
return type_;
}
void SetFinishedWithException(const char* err_msg = nullptr) {
#ifdef CAFFE2_USE_EXCEPTION_PTR
if (!caught_exception_) {
caught_exception_ = std::current_exception();
}
CAFFE_ENFORCE(caught_exception_, "No exception found");
#else
VLOG(1) << "No support for exceptions in Event";
#endif // CAFFE2_USE_EXCEPTION_PTR
if (err_msg) {
SetFinished(err_msg);
} else {
SetFinished("Error happened during an operator run");
}
}
bool HasException() const {
#ifdef CAFFE2_USE_EXCEPTION_PTR
return (bool)caught_exception_;
#else
VLOG(1) << "No support for exceptions in Event";
return false;
#endif // CAFFE2_USE_EXCEPTION_PTR
}
int64_t ErrorTimestamp() const {
return error_timestamp_;
}
void RethrowException() const {
#ifdef CAFFE2_USE_EXCEPTION_PTR
if (caught_exception_) {
std::rethrow_exception(caught_exception_);
}
#else
VLOG(1) << "No support for exceptions in Event";
#endif // CAFFE2_USE_EXCEPTION_PTR
}
// event_ is going to be accessed by the EventCreate/Record/Wait/Finish
// functions, but one should not use it outside the own Event functionalities.
// In the future we may move it to a private member.
std::shared_ptr<void> event_;
private:
int type_;
DeviceOption option_;
#ifdef CAFFE2_USE_EXCEPTION_PTR
std::exception_ptr caught_exception_;
#endif // CAFFE2_USE_EXCEPTION_PTR
int64_t error_timestamp_{};
static EventCreateFunction event_creator_[MaxDeviceTypes];
static EventRecordFunction event_recorder_[MaxDeviceTypes];
static EventWaitFunction event_waiter_[MaxDeviceTypes][MaxDeviceTypes];
static EventFinishFunction event_finisher_[MaxDeviceTypes];
static EventQueryFunction event_querier_[MaxDeviceTypes];
static EventErrorMessageFunction event_err_msg_getter_[MaxDeviceTypes];
static EventSetFinishedFunction event_finished_setter_[MaxDeviceTypes];
static EventResetFunction event_resetter_[MaxDeviceTypes];
static EventSetCallbackFunction event_callback_setter_[MaxDeviceTypes];
template <DeviceType t>
friend struct EventCreateFunctionRegisterer;
template <DeviceType t>
friend struct EventRecordFunctionRegisterer;
template <DeviceType w, DeviceType d>
friend struct EventWaitFunctionRegisterer;
template <DeviceType t>
friend struct EventFinishFunctionRegisterer;
template <DeviceType t>
friend struct EventQueryFunctionRegisterer;
template <DeviceType t>
friend struct EventErrorMessageFunctionRegisterer;
template <DeviceType t>
friend struct EventSetFinishedFunctionRegisterer;
template <DeviceType t>
friend struct EventSetCallbackFunctionRegisterer;
template <DeviceType t>
friend struct EventResetFunctionRegisterer;
};
template <DeviceType t>
struct EventCreateFunctionRegisterer {
explicit EventCreateFunctionRegisterer(EventCreateFunction f) {
auto d = TypeToProto(t);
Event::event_creator_[d] = f;
}
};
#define REGISTER_EVENT_CREATE_FUNCTION(t, f) \
namespace { \
static EventCreateFunctionRegisterer<t> g_event_create_##d(f); \
}
template <DeviceType t>
struct EventRecordFunctionRegisterer {
explicit EventRecordFunctionRegisterer(EventRecordFunction f) {
auto d = TypeToProto(t);
Event::event_recorder_[d] = f;
}
};
#define REGISTER_EVENT_RECORD_FUNCTION(t, f) \
namespace { \
static EventRecordFunctionRegisterer<t> g_event_record_##d(f); \
}
template <DeviceType waiter_type, DeviceType event_type>
struct EventWaitFunctionRegisterer {
explicit EventWaitFunctionRegisterer(EventWaitFunction f) {
auto waiter_index = TypeToProto(waiter_type);
auto event_index = TypeToProto(event_type);
Event::event_waiter_[waiter_index][event_index] = f;
}
};
#define REGISTER_EVENT_WAIT_FUNCTION(w, d, f) \
namespace { \
static EventWaitFunctionRegisterer<w, d> g_event_wait_##w##_##d(f); \
}
template <DeviceType t>
struct EventQueryFunctionRegisterer {
explicit EventQueryFunctionRegisterer(EventQueryFunction f) {
auto d = TypeToProto(t);
Event::event_querier_[d] = f;
}
};
#define REGISTER_EVENT_QUERY_FUNCTION(t, f) \
namespace { \
static EventQueryFunctionRegisterer<t> g_event_query_##d(f); \
}
template <DeviceType t>
struct EventErrorMessageFunctionRegisterer {
explicit EventErrorMessageFunctionRegisterer(EventErrorMessageFunction f) {
auto d = TypeToProto(t);
Event::event_err_msg_getter_[d] = f;
}
};
#define REGISTER_EVENT_ERROR_MESSAGE_FUNCTION(t, f) \
namespace { \
static EventErrorMessageFunctionRegisterer<t> g_event_err_msg_##d(f); \
}
template <DeviceType t>
struct EventSetFinishedFunctionRegisterer {
explicit EventSetFinishedFunctionRegisterer(EventSetFinishedFunction f) {
auto d = TypeToProto(t);
Event::event_finished_setter_[d] = f;
}
};
#define REGISTER_EVENT_SET_FINISHED_FUNCTION(t, f) \
namespace { \
static EventSetFinishedFunctionRegisterer<t> g_event_set_finished_##d(f); \
}
template <DeviceType t>
struct EventSetCallbackFunctionRegisterer {
explicit EventSetCallbackFunctionRegisterer(EventSetCallbackFunction f) {
auto d = TypeToProto(t);
Event::event_callback_setter_[d] = f;
}
};
#define REGISTER_EVENT_SET_CALLBACK_FUNCTION(t, f) \
namespace { \
static EventSetCallbackFunctionRegisterer<t> g_event_set_callback_##d(f); \
}
template <DeviceType t>
struct EventFinishFunctionRegisterer {
explicit EventFinishFunctionRegisterer(EventFinishFunction f) {
auto d = TypeToProto(t);
Event::event_finisher_[d] = f;
}
};
#define REGISTER_EVENT_FINISH_FUNCTION(t, f) \
namespace { \
static EventFinishFunctionRegisterer<t> g_event_finish_##d(f); \
}
template <DeviceType t>
struct EventResetFunctionRegisterer {
explicit EventResetFunctionRegisterer(EventResetFunction f) {
auto d = TypeToProto(t);
Event::event_resetter_[d] = f;
}
};
#define REGISTER_EVENT_RESET_FUNCTION(t, f) \
namespace { \
static EventResetFunctionRegisterer<t> g_event_reset_##d(f); \
}
} // namespace caffe2
#endif // CAFFE2_CORE_EVENT_H_
| 12,420
| 31.94695
| 80
|
h
|
null |
pytorch-main/caffe2/core/event_cpu.h
|
#include "caffe2/core/event.h"
#include <atomic>
#include <condition_variable>
namespace caffe2 {
struct CPUEventWrapper {
explicit CPUEventWrapper(const DeviceOption& option)
: status_(EventStatus::EVENT_INITIALIZED) {
CAFFE_ENFORCE(
option.device_type() == PROTO_CPU ||
option.device_type() == PROTO_MKLDNN ||
option.device_type() == PROTO_IDEEP,
"Expected CPU/MKLDNN/IDEEP device type");
}
~CPUEventWrapper() {}
std::mutex mutex_;
std::condition_variable cv_completed_;
std::atomic<int> status_;
std::string err_msg_;
std::vector<EventCallbackFunction> callbacks_;
};
void EventCreateCPU(const DeviceOption& option, Event* event);
void EventRecordCPU(
Event* event,
const void* /* unused */,
const char* err_msg);
void EventFinishCPU(const Event* event);
void EventWaitCPUCPU(const Event* event, void* /* context */);
EventStatus EventQueryCPU(const Event* event);
const std::string& EventErrorMessageCPU(const Event* event);
void EventSetFinishedCPU(const Event* event, const char* err_msg);
bool EventCanScheduleCPU(const Event*, const Event*);
void EventResetCPU(Event*);
} // namespace caffe2
| 1,192
| 23.854167
| 66
|
h
|
null |
pytorch-main/caffe2/core/export_c10_op_to_caffe2.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/Registry.h>
#include "caffe2/core/operator.h"
// TODO Also register c10 operators on mobile
#if !defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE)
#include <ATen/core/dispatch/Dispatcher.h>
#include <ATen/core/ivalue.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/C++17.h>
#include <c10/util/Metaprogramming.h>
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include <c10/util/irange.h>
namespace caffe2 {
/**
* To make a c10 operator "C10Add" callable from caffe2 as "C2MyAddOpName", just
* write
*
* To export the CPU kernel
* C10_EXPORT_C10_OP_TO_CAFFE2_CPU(C10Add, C2MyAddOp)
*
* To export the CUDA kernel
* C10_EXPORT_C10_OP_TO_CAFFE2_CUDA(C10Add, C2MyAddOp)
*
*/
namespace detail {
template <class Context>
class C10OperatorWrapper final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
C10OperatorWrapper(
const c10::OperatorHandle& op,
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
op_(op),
has_preallocated_outputs_(
op_.schema().arguments().size() != 0 &&
op_.schema().arguments().back().name() ==
detail::PREALLOCATED_OUTPUT_ARGNAME) {
AT_ASSERT(
!has_preallocated_outputs_ ||
op_.schema().arguments().back().type()->isSubtypeOf(
*OptionalType::create(ListType::ofTensors())));
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
AT_ASSERT(operator_def.output_size() == op_.schema().returns().size());
AT_ASSERT(
operator_def.input_size() + (has_preallocated_outputs_ ? 1 : 0) <=
op_.schema()
.arguments()
.size()); // '<=' because there might be caffe2 nontensor arguments
}
bool RunOnDevice() override {
// due to caching the stack_, concurrent calling is not allowed.
// TODO thread_local might fix this
std::lock_guard<std::mutex> lock(mutex_);
pushInputs_();
callKernel_();
popOutputs_();
return true;
}
private:
void pushInputs_() {
AT_ASSERT(stack_.size() == 0);
stack_.reserve(
op_.schema().arguments().size() + (has_preallocated_outputs_ ? 1 : 0));
size_t input_tensor_index = 0;
for (const auto& argument : op_.schema().arguments()) {
if (argument.name() == detail::PREALLOCATED_OUTPUT_ARGNAME) {
// note: if detail::PREALLOCATED_OUTPUT_ARGNAME was at the end of the
// argument list, then has_preallocated_outputs_ would be true.
AT_ASSERTM(
has_preallocated_outputs_,
"Error in caffe2->c10 wrapper: Operator schema has a parameter named ",
detail::PREALLOCATED_OUTPUT_ARGNAME,
", but it's not at the end of the argument list");
AT_ASSERTM(
argument.type()->isSubtypeOf(
*OptionalType::create(ListType::ofTensors())),
"Error in caffe2->c10 wrapper: Operator schema has a parameter named ",
detail::PREALLOCATED_OUTPUT_ARGNAME,
", but it's not of type TensorList?");
stack_.emplace_back(preallocated_outputs_());
} else if (argument.type()->isSubtypeOf(*TensorType::get())) {
AT_ASSERTM(
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
input_tensor_index < InputSize(),
"Error in caffe2->c10 wrapper: Too few tensor arguments given (",
InputSize(),
"), operator schema expected more.");
stack_.emplace_back(at::Tensor(Input(input_tensor_index++)));
} else if (argument.type()->isSubtypeOf(*OptionalType::ofTensor())) {
if (input_tensor_index < InputSize()) {
stack_.emplace_back(at::Tensor(Input(input_tensor_index++)));
} else {
stack_.emplace_back(IValue());
}
} else if (argument.type()->isSubtypeOf(*ListType::ofTensors())) {
AT_ASSERTM(
input_tensor_index == 0,
"Error in caffe2->c10 wrapper: Schema can only have either one or more Tensor inputs or one TensorList input.");
stack_.emplace_back(array_inputs_());
input_tensor_index = InputSize();
} else {
stack_.emplace_back(get_nontensor_argument_(argument));
}
}
AT_ASSERTM(
input_tensor_index == InputSize(),
"Error in caffe2->c10 wrapper: Number of caffe2 operator inputs (",
InputSize(),
") doesn't match number of tensor arguments (",
input_tensor_index,
") in the c10 operator schema.");
}
void callKernel_() {
AT_ASSERT(stack_.size() == op_.schema().arguments().size());
op_.callBoxed(&stack_);
}
void popOutputs_() {
AT_ASSERT(stack_.size() == op_.schema().returns().size());
for (const auto i : c10::irange(op_.schema().returns().size())) {
OperatorBase::SetOutputTensor(i, Tensor(std::move(stack_[i]).toTensor()));
}
stack_.clear();
}
c10::List<at::Tensor> array_inputs_() {
c10::List<at::Tensor> result;
result.reserve(InputSize());
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto i : c10::irange(InputSize())) {
result.emplace_back(Input(i));
}
return result;
}
c10::List<at::Tensor> preallocated_outputs_() {
c10::List<at::Tensor> result;
result.reserve(OutputSize());
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (const auto i : c10::irange(OutputSize())) {
result.emplace_back(OperatorBase::OutputTensorOrUndefined(i));
}
return result;
}
IValue get_nontensor_argument_(const c10::Argument& argument) {
if (argument.type()->isSubtypeOf(*IntType::get())) {
return get_nontensor_argument_<int>(
argument.name(), argument.default_value());
} else if (argument.type()->isSubtypeOf(*FloatType::get())) {
return get_nontensor_argument_<double>(
argument.name(), argument.default_value());
} else if (argument.type()->isSubtypeOf(*BoolType::get())) {
return get_nontensor_argument_<bool>(
argument.name(), argument.default_value());
} else {
// TODO Support more types
AT_ERROR(
"Error in caffe2->c10 wrapper: Unsupported argument type ",
argument.type()->str(),
" in c10 operator schema");
}
}
template <class T>
IValue get_nontensor_argument_(
const std::string& name,
const c10::optional<IValue>& default_value) {
if (default_value.has_value()) {
return this->template GetSingleArgument<T>(name, default_value->to<T>());
} else {
TORCH_CHECK(
this->template HasSingleArgumentOfType<T>(name),
"Error in caffe2->c10 wrapper: Expected argument '",
name,
"' missing or wrong type.");
return this->template GetSingleArgument<T>(name, 0);
}
}
c10::OperatorHandle op_;
// has_preallocated_outputs_ is true iff the operator schema has a last
// argument that is a TensorList and has a name equal to with the name equal
// to detail::PREALLOCATED_OUTPUT_ARGNAME. This argument is then used to pass
// in preallocated output tensors to the caffe2 operator.
bool has_preallocated_outputs_;
// this is stored as a member here to avoid having to re-allocate a stack
// for each call. Between kernel calls, stack_.size() == 0, but capacity
// should not need to be grown anymore after the first call.
std::vector<IValue> stack_;
std::mutex mutex_;
};
template <class Context>
inline std::function<
std::unique_ptr<OperatorBase>(const OperatorDef&, Workspace*)>
createC10OperatorWrapper(const c10::OperatorName& op_name) {
return [op_name](const OperatorDef& op_def, Workspace* ws) {
auto op_handle =
c10::Dispatcher::singleton().findSchema(op_name);
AT_ASSERTM(
op_handle.has_value(),
"Tried to register c10 operator ",
op_name.name,
".",
op_name.overload_name,
" with caffe2, but didn't find the c10 operator.");
return std::make_unique<C10OperatorWrapper<Context>>(
*op_handle, op_def, ws);
};
}
} // namespace detail
} // namespace caffe2
#define C10_EXPORT_C10_OP_TO_CAFFE2_CPU( \
OperatorName, Name) \
REGISTER_CPU_OPERATOR_CREATOR( \
Name, \
::caffe2::detail::createC10OperatorWrapper<CPUContext>( \
{OperatorName, ""}))
#define C10_EXPORT_C10_OP_TO_CAFFE2_CUDA( \
OperatorName, Name) \
REGISTER_CUDA_OPERATOR_CREATOR( \
Name, \
::caffe2::detail::createC10OperatorWrapper<CUDAContext>( \
{OperatorName, ""}))
#define C10_EXPORT_C10_OP_TO_CAFFE2_HIP( \
OperatorName, Name) \
REGISTER_HIP_OPERATOR_CREATOR( \
Name, \
::caffe2::detail::createC10OperatorWrapper<HIPContext>( \
{OperatorName, ""}))
#else
#define C10_EXPORT_C10_OP_TO_CAFFE2_CPU( \
OperatorName, Name)
#define C10_EXPORT_C10_OP_TO_CAFFE2_CUDA( \
OperatorName, Name)
#define C10_EXPORT_C10_OP_TO_CAFFE2_HIP( \
OperatorName, Name)
#endif
| 9,539
| 35.136364
| 124
|
h
|
null |
pytorch-main/caffe2/core/export_caffe2_op_to_c10.h
|
#pragma once
#include <c10/macros/Macros.h>
#if defined(EXPOSE_C2_OPS) || \
!defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE)
#include <ATen/core/dispatch/OperatorOptions.h>
#include <ATen/core/function_schema.h>
#include <ATen/core/grad_mode.h>
#include <ATen/core/op_registration/op_registration.h>
#include <c10/core/CompileTimeFunctionPointer.h>
#include <c10/util/irange.h>
#include <torch/csrc/jit/frontend/function_schema_parser.h>
#include <torch/library.h>
#include <caffe2/core/tensor.h>
#include <vector>
namespace caffe2 {
namespace detail {
constexpr const char* PREALLOCATED_OUTPUT_ARGNAME =
"_caffe2_preallocated_outputs";
using _CallCaffe2OpFunc = std::vector<caffe2::Tensor>(
const c10::FunctionSchema& schema,
std::vector<c10::IValue> &&inputs,
std::vector<caffe2::Tensor> &&outputs);
template <class Caffe2Operator>
inline std::vector<caffe2::Tensor> _call_caffe2_op(
const c10::FunctionSchema& schema,
std::vector<c10::IValue> &&inputs,
std::vector<caffe2::Tensor> &&outputs) {
Caffe2Operator op(schema, std::move(inputs), std::move(outputs), -1);
op.Run(-1);
return std::move(op).move_output_tensors();
}
// This function is inline in the hope that compilers optimizing for speed will
// inline it into call_caffe2_op_from_c10, allowing call_op to be inlined and
// avoiding the function pointer indirection, while compilers optimizing for
// binary size will keep it a separate function instead of inlining it into
// a template and will reuse the binary code of this function between ops.
// We measured and confirmed that binary size off the instagram ios app is
// reduced when having _call_caffe2_op_from_c10 separate from the templated
// call_caffe2_op_from_c10.
inline void _call_caffe2_op_from_c10(
c10::Stack* stack,
const c10::FunctionSchema& schema,
_CallCaffe2OpFunc* call_op) {
// precondition: on the stack, there's one IValue for each argument of the
// c10 schema. The last argument is an optional tensor list that
// (if not ivalue::None) contains a preallocated output tensor for each
// operator output.
// As an invariant, we don't want any autograd gradients to be tracked in
// Caffe2 operators.
at::NoGradGuard guard;
AT_ASSERT(
schema.arguments().size() != 0 &&
schema.arguments().back().type()->isSubtypeOf(
*OptionalType::create(ListType::ofTensors())));
IValue preallocated_outputs = torch::jit::pop(*stack);
const size_t num_inputs = schema.arguments().size() -
1; // -1 because the last argument is the list of preallocated tensors
c10::List<at::Tensor> outputs;
if (preallocated_outputs.isNone()) {
// either the schema doesn't support preallocated outputs or it does but
// they haven't been passed in. Pass a list of uninitialized tensors to
// the caffe2 operator as preallocated outputs.
outputs.resize(schema.returns().size());
} else {
AT_ASSERT(preallocated_outputs.isTensorList());
outputs = std::move(preallocated_outputs).toTensorList();
}
// TODO Avoid vector allocation. One idea would be to keep the std::vector
// instances in the cache.
std::vector<IValue> inputs = torch::jit::pop(*stack, num_inputs);
// Convert outputs to caffe2::Tensor
const size_t num_outputs = outputs.size();
std::vector<caffe2::Tensor> outputs_c2(num_outputs);
for (auto i : c10::irange(num_outputs)) {
outputs_c2[i] = caffe2::Tensor(outputs.extract(i));
}
outputs_c2 = (*call_op)(schema, std::move(inputs), std::move(outputs_c2));
TORCH_INTERNAL_ASSERT(num_outputs == outputs_c2.size());
bool return_tensor_list = false;
if (schema.returns().size() == 1) {
auto type = schema.returns()[0].type();
if (c10::ListTypePtr list_type = type->cast<c10::ListType>()) {
if (list_type->getElementType()->kind() == c10::TypeKind::TensorType) {
return_tensor_list = true;
}
}
}
if (return_tensor_list) {
for (const auto i : c10::irange(num_outputs)) {
outputs.set(i, at::Tensor(std::move(outputs_c2[i])));
}
torch::jit::push(*stack, outputs);
} else {
for (const auto i : c10::irange(num_outputs)) {
torch::jit::push(*stack, at::Tensor(std::move(outputs_c2[i])));
}
}
// postcondition: All inputs are cleared from the stack, there's now one
// IValue for each output which holds the result. This
// might reuse one of the preallocated tensors but doesn't have
// to.
}
template <const c10::FunctionSchema& (*Schema)(), class Caffe2Operator>
void call_caffe2_op_from_c10(
const c10::OperatorHandle& /*opHandle*/,
c10::Stack* stack) {
_call_caffe2_op_from_c10(stack, Schema(), &_call_caffe2_op<Caffe2Operator>);
}
inline FunctionSchema make_function_schema_for_c10(
const char* schema_str,
c10::optional<c10::AliasAnalysisKind> optional_alias_analysis_kind) {
#if !defined(EXPOSE_C2_OPS) && \
(defined(CAFFE2_IS_XPLAT_BUILD) || defined(C10_MOBILE))
throw std::logic_error(
"We don't support registering c10 ops on mobile yet because the function schema parser isn't present in the mobile build.");
#else
c10::FunctionSchema parsed_schema = torch::jit::parseSchema(schema_str);
std::vector<c10::Argument> arguments = parsed_schema.arguments();
arguments.emplace_back(
PREALLOCATED_OUTPUT_ARGNAME,
c10::OptionalType::create(c10::ListType::ofTensors()),
nullopt,
IValue());
auto schema = FunctionSchema(
parsed_schema.name(),
parsed_schema.overload_name(),
std::move(arguments),
parsed_schema.returns(),
parsed_schema.is_vararg(),
parsed_schema.is_varret());
if (optional_alias_analysis_kind) {
schema.setAliasAnalysis(*optional_alias_analysis_kind);
}
return schema;
#endif
}
} // namespace detail
} // namespace caffe2
/**
* To register a caffe2 operator caffe2::MyOperator with the c10 dispatcher,
* call:
*
* In caffe2/operators/MyOperator.h:
*
* > C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(C10MyOperator) // C10MyOperator is the
* name
* // used by c10 for this operator
*
* In caffe2/operators/MyOperator.cc
*
* > C10_EXPORT_CAFFE2_OP_TO_C10_CPU (
* > C10MyOperator,
* > "_caffe2::C10MyOperator(Tensor input1, int argument2, float argument3)
* -> (Tensor output1, Tensor output2)" > caffe2::MyOperator<caffe2::CPUContext>
* // This is the caffe2 operator > //
* class template > )
*
* In caffe2/operators/MyOperator.cu
*
* > C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(C10MyOperator ,
* caffe2::MyOperator<caffe2::CUDAContext>)
*
* Notes:
* - all macros must be defined in the top level namespace, not in namespace
* caffe2.
* - all operators must call C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10 and
* C10_EXPORT_CAFFE2_OP_TO_C10_CPU .
* - calling C10_EXPORT_CAFFE2_OP_TO_C10_CUDA is optional and can be omitted if
* you don't want to expose the operator for CUDA operations.
* - caffe2 arguments must come after caffe2 inputs, in other words, any tensor
* inputs must precede any non-tensor inputs.
*
* More complex use cases:
* - If your operator has a variable number of input tensors, make the first (!)
* input an input of type TensorList. There must be no other tensor inputs.
*/
#define C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(OperatorName) \
namespace caffe2 { \
namespace _c10_ops { \
TORCH_API const FunctionSchema& schema_##OperatorName(); \
} \
}
#define C10_EXPORT_CAFFE2_OP_TO_C10_SCHEMA_ONLY( \
OperatorName, OperatorSchema, OptionalAliasAnalysisKind) \
/* Register the op schema with the c10 dispatcher */ \
namespace caffe2 { \
namespace _c10_ops { \
C10_EXPORT const FunctionSchema& schema_##OperatorName() { \
static const FunctionSchema schema = \
::caffe2::detail::make_function_schema_for_c10( \
OperatorSchema, OptionalAliasAnalysisKind); \
return schema; \
} \
TORCH_LIBRARY_FRAGMENT(_caffe2, m) { \
m.def(::caffe2::detail::make_function_schema_for_c10( \
OperatorSchema, OptionalAliasAnalysisKind)); \
} \
} \
}
#define C10_EXPORT_CAFFE2_OP_TO_C10_CPU_KERNEL_ONLY( \
OperatorName, OperatorClass) \
/* Register call_caffe2_op_from_c10 as a kernel with the c10 dispatcher */ \
TORCH_LIBRARY_IMPL(_caffe2, CPU, m) { \
m.impl( \
"_caffe2::" #OperatorName, \
torch::CppFunction::makeFromBoxedFunction< \
::caffe2::detail::call_caffe2_op_from_c10< \
::caffe2::_c10_ops::schema_##OperatorName, \
OperatorClass>>()); \
}
#define C10_EXPORT_CAFFE2_OP_TO_C10_CPU( \
OperatorName, OperatorSchema, OperatorClass) \
C10_EXPORT_CAFFE2_OP_TO_C10_SCHEMA_ONLY( \
OperatorName, OperatorSchema, c10::nullopt) \
C10_EXPORT_CAFFE2_OP_TO_C10_CPU_KERNEL_ONLY(OperatorName, OperatorClass)
#define C10_EXPORT_CAFFE2_OP_TO_C10_CPU_WITH_ALIAS_ANALYSIS( \
OperatorName, OperatorSchema, OperatorClass, OptionalAliasAnalysisKind) \
C10_EXPORT_CAFFE2_OP_TO_C10_SCHEMA_ONLY( \
OperatorName, OperatorSchema, OptionalAliasAnalysisKind) \
C10_EXPORT_CAFFE2_OP_TO_C10_CPU_KERNEL_ONLY(OperatorName, OperatorClass)
#define C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(OperatorName, OperatorClass) \
/* Register call_caffe2_op_from_c10 as a kernel with the c10 dispatcher */ \
TORCH_LIBRARY_IMPL(_caffe2, CUDA, m) { \
m.impl( \
"_caffe2::" #OperatorName, \
torch::CppFunction::makeFromBoxedFunction< \
::caffe2::detail::call_caffe2_op_from_c10< \
::caffe2::_c10_ops::schema_##OperatorName, \
OperatorClass>>()); \
}
// You should never manually call the C10_EXPORT_CAFFE2_OP_TO_C10_HIP macro .
// The C10_EXPORT_CAFFE2_OP_TO_C10_CUDA macro from above will be automatically
// rewritten to C10_EXPORT_CAFFE2_OP_TO_C10_HIP by hipify .
#define C10_EXPORT_CAFFE2_OP_TO_C10_HIP(OperatorName, OperatorClass) \
/* Register call_caffe2_op_from_c10 as a kernel with the c10 dispatcher */ \
TORCH_LIBRARY_IMPL(_caffe2, HIP, m) { \
m.impl( \
"_caffe2::" #OperatorName, \
torch::CppFunction::makeFromBoxedFunction< \
::caffe2::detail::call_caffe2_op_from_c10< \
::caffe2::_c10_ops::schema_##OperatorName, \
OperatorClass>>()); \
}
#else
// Don't use c10 dispatcher on mobile because of binary size
#define C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(OperatorName)
#define C10_EXPORT_CAFFE2_OP_TO_C10_SCHEMA_ONLY( \
OperatorName, OperatorSchema, OptionalAliasAnalysisKind)
#define C10_EXPORT_CAFFE2_OP_TO_C10_CPU_KERNEL_ONLY(OperatorName, OperatorClass)
#define C10_EXPORT_CAFFE2_OP_TO_C10_CPU( \
OperatorName, OperatorSchema, OperatorClass)
#define C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(OperatorName, OperatorClass)
#define C10_EXPORT_CAFFE2_OP_TO_C10_HIP(OperatorName, OperatorClass)
#endif
| 12,349
| 42.639576
| 130
|
h
|
null |
pytorch-main/caffe2/core/graph.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/proto_utils.h"
#include "caffe2/utils/string_utils.h"
#include <algorithm>
#include <unordered_map>
#include <unordered_set>
namespace caffe2 {
namespace transform {
/**
* Graph representation of an operator.
*/
struct TORCH_API Node {
public:
// Empty constructor for resize
Node() {}
// Alternate constructor
Node(
const OperatorDef& op,
bool active,
std::map<int, std::vector<string>> parents,
std::map<int, std::vector<string>> children)
: op(op), active(active), parents(parents), children(children) {}
// The OperatorDef which this node represents.
OperatorDef op;
// Keeps track of if an operator has been deleted through a transformation.
bool active = true;
// Stores a pair (idx, blob_list),
// idx = index of the child
// blob_list = a list of strings, containing the blobs that connect the nodes
std::map<int, std::vector<string>> parents;
std::map<int, std::vector<string>> children;
};
/**
* Graph representation of a Netdef.
*/
struct TORCH_API Graph {
public:
/**
* Given a subgraph, gets all of the parents of the subgraph, as well as
* their associated blob names. Sorted by blob names.
*
* <string, int> := (name of blob writing into subgraph,
* index of node that writes into subgraph using that blob)
*/
const std::vector<std::pair<string, int>> GetSubgraphInput(
const std::vector<int>& subgraph);
/**
* Given a subgraph, gets all of the children of the subgraph, as well as
* their associated blob names. Sorted by blob names.
*
* <string, int> := (name of blob reading from subgraph,
* index of node that reads from subgraph using that blob)
*/
const std::vector<std::pair<string, int>> GetSubgraphOutput(
const std::vector<int>& subgraph);
/**
* Graph generation.
* Given a netdef, returns a Graph.
*
* Each node represents an operator.
* An edge exists between two nodes if the parent op writes to a blob, which
* is the input of the child blob, with no other op writing to the blob in
* between the execution order.
*
* Time Complexity: O(E), where E is the number of blobs
*/
explicit Graph(const NetDef& net_def);
/**
* Generates a NetDef Representation for the current graph.
* Nodes are visited in topological order, which is proper Opdef ordering.
* TODO(benz):
* There exists conflicts with repeated blob names, where topological sorting
* is not sufficient for correct netdef representation, unless blobs are
* renamed.
* For example, if after a transformation, We have operator ancestry:
* A --> B --> C, and also A --> D --> E, where B -> C and D -> E uses the
* same blob name, then A, B, D, E, C is a correct topological ordering,
* but D will write to the blob that C reads from, instead of B.
* Currently believe that there will always be ambiguity unless blobs are
* renamed.
* This is solved by performing SSA on all transformed blob names.
*/
NetDef GetNetDef();
/**
* Deactivate a subgraph, and get rid of all edges into this subgraph.
*/
void DeactivateSubgraph(std::vector<int> subgraph);
size_t size() const {
return nodes_.size();
}
void push_node(const Node& new_node) {
return nodes_.push_back(new_node);
}
void resize_nodes(size_t new_size) {
nodes_.resize(new_size);
}
// Index safe, less verbose way to access nodes
inline const Node& node(size_t idx) const {
return nodes_.at(idx);
}
inline Node& node(size_t idx) {
return nodes_.at(idx);
}
inline bool is_node_active(size_t idx) {
return node(idx).active;
}
inline const std::set<string>& external_input() const {
return external_input_;
}
inline const std::set<string>& external_output() const {
return external_output_;
}
private:
const std::vector<std::pair<string, int>> GetSubgraphPerimeterHelper(
bool from_children,
const std::vector<int>& match);
// Stores the netdef representation. Is updated upon calls to GetNetDef.
NetDef netdef_;
// Stores which blobs the graph reads from, and writes to.
std::set<string> external_input_;
std::set<string> external_output_;
// Keeps track of all the Operators currently within graph, even if inactive.
std::vector<Node> nodes_;
};
} // namespace transform
// Adds an operator def to a netdef.
// Returns the ptr, if you want to add anything extra (such as device_option)
TORCH_API OperatorDef* AddOp(
NetDef* netdef_ptr,
string op_type,
std::vector<string> inputs,
std::vector<string> outputs);
/**
* This allows for the use of * and | to match operator types,
* engines, or any other property that is represented by strings.
*
* For example, if we wanted to match an operator to Conv or FC, we can give:
* "Conv|FC" as the type() of that op.
*/
TORCH_API bool MatchStrings(string p, string s);
/**
* This ensures that each named arg that exists in the pattern exists in g_op,
* is equal in value.
*/
TORCH_API bool MatchArguments(const OperatorDef& p_op, const OperatorDef& g_op);
} // namespace caffe2
| 5,258
| 28.216667
| 80
|
h
|
null |
pytorch-main/caffe2/core/init.h
|
#ifndef CAFFE2_CORE_INIT_H_
#define CAFFE2_CORE_INIT_H_
#include "caffe2/core/common.h"
#include "caffe2/core/flags.h"
#include "caffe2/core/logging.h"
namespace caffe2 {
namespace internal {
class TORCH_API Caffe2InitializeRegistry {
public:
typedef bool (*InitFunction)(int*, char***);
// Registry() is defined in .cpp file to make registration work across
// multiple shared libraries loaded with RTLD_LOCAL
static Caffe2InitializeRegistry* Registry();
void Register(
InitFunction function,
bool run_early,
const char* description,
const char* name = nullptr) {
if (name) {
named_functions_[name] = function;
}
if (run_early) {
// Disallow registration after GlobalInit of early init functions
CAFFE_ENFORCE(!early_init_functions_run_yet_);
early_init_functions_.emplace_back(function, description);
} else {
if (init_functions_run_yet_) {
// Run immediately, since GlobalInit already ran. This should be
// rare but we want to allow it in some cases.
LOG(WARNING) << "Running init function after GlobalInit: "
<< description;
// TODO(orionr): Consider removing argc and argv for non-early
// registration. Unfortunately that would require a new InitFunction
// typedef, so not making the change right now.
//
// Note that init doesn't receive argc and argv, so the function
// might fail and we want to raise an error in that case.
int argc = 0;
char** argv = nullptr;
bool success = (function)(&argc, &argv);
CAFFE_ENFORCE(success);
} else {
// Wait until GlobalInit to run
init_functions_.emplace_back(function, description);
}
}
}
bool RunRegisteredEarlyInitFunctions(int* pargc, char*** pargv) {
CAFFE_ENFORCE(!early_init_functions_run_yet_);
early_init_functions_run_yet_ = true;
return RunRegisteredInitFunctionsInternal(
early_init_functions_, pargc, pargv);
}
bool RunRegisteredInitFunctions(int* pargc, char*** pargv) {
CAFFE_ENFORCE(!init_functions_run_yet_);
init_functions_run_yet_ = true;
return RunRegisteredInitFunctionsInternal(init_functions_, pargc, pargv);
}
bool RunNamedFunction(const char* name, int* pargc, char*** pargv) {
if (named_functions_.count(name)) {
return named_functions_[name](pargc, pargv);
}
return false;
}
private:
// Run all registered initialization functions. This has to be called AFTER
// all static initialization are finished and main() has started, since we are
// using logging.
bool RunRegisteredInitFunctionsInternal(
vector<std::pair<InitFunction, const char*>>& functions,
int* pargc, char*** pargv) {
for (const auto& init_pair : functions) {
VLOG(1) << "Running init function: " << init_pair.second;
if (!(*init_pair.first)(pargc, pargv)) {
LOG(ERROR) << "Initialization function failed.";
return false;
}
}
return true;
}
Caffe2InitializeRegistry() {}
vector<std::pair<InitFunction, const char*> > early_init_functions_;
vector<std::pair<InitFunction, const char*> > init_functions_;
std::unordered_map<std::string, InitFunction> named_functions_;
bool early_init_functions_run_yet_ = false;
bool init_functions_run_yet_ = false;
};
} // namespace internal
TORCH_API bool unsafeRunCaffe2InitFunction(
const char* name,
int* pargc = nullptr,
char*** pargv = nullptr);
class TORCH_API InitRegisterer {
public:
InitRegisterer(
internal::Caffe2InitializeRegistry::InitFunction function,
bool run_early,
const char* description,
const char* name = nullptr) {
internal::Caffe2InitializeRegistry::Registry()->Register(
function, run_early, description, name);
}
};
#define REGISTER_CAFFE2_INIT_FUNCTION(name, function, description) \
namespace { \
::caffe2::InitRegisterer \
g_caffe2_initregisterer_##name(function, false, description, #name); \
} // namespace
#define REGISTER_CAFFE2_EARLY_INIT_FUNCTION(name, function, description) \
namespace { \
::caffe2::InitRegisterer \
g_caffe2_initregisterer_##name(function, true, description, #name); \
} // namespace
/**
* @brief Determine whether GlobalInit has already been run
*/
TORCH_API bool GlobalInitAlreadyRun();
class TORCH_API GlobalInitIsCalledGuard {
public:
GlobalInitIsCalledGuard() {
if (!GlobalInitAlreadyRun()) {
LOG(WARNING)
<< "Caffe2 GlobalInit should be run before any other API calls.";
}
}
};
/**
* @brief Initialize the global environment of caffe2.
*
* Caffe2 uses a registration pattern for initialization functions. Custom
* initialization functions should take the signature
* bool (*func)(int*, char***)
* where the pointers to argc and argv are passed in. Caffe2 then runs the
* initialization in three phases:
* (1) Functions registered with REGISTER_CAFFE2_EARLY_INIT_FUNCTION. Note that
* since it is possible the logger is not initialized yet, any logging in
* such early init functions may not be printed correctly.
* (2) Parses Caffe-specific commandline flags, and initializes caffe logging.
* (3) Functions registered with REGISTER_CAFFE2_INIT_FUNCTION.
* If there is something wrong at each stage, the function returns false. If
* the global initialization has already been run, the function returns false
* as well.
*
* GlobalInit is re-entrant safe; a re-entrant call will no-op and exit.
*
* GlobalInit is safe to call multiple times but not idempotent;
* successive calls will parse flags and re-set caffe2 logging levels from
* flags as needed, but NOT re-run early init and init functions.
*
* GlobalInit is also thread-safe and can be called concurrently.
*/
TORCH_API bool GlobalInit(int* pargc, char*** argv);
/**
* @brief Initialize the global environment without command line arguments
*
* This is a version of the GlobalInit where no argument is passed in.
* On mobile devices, use this global init, since we cannot pass the
* command line options to caffe2, no arguments are passed.
*/
TORCH_API bool GlobalInit();
} // namespace caffe2
#endif // CAFFE2_CORE_INIT_H_
| 6,496
| 35.094444
| 80
|
h
|
null |
pytorch-main/caffe2/core/memonger.h
|
#ifndef CAFFE2_CORE_MEMONGER_H_
#define CAFFE2_CORE_MEMONGER_H_
#include <unordered_set>
#include "caffe2/core/common.h"
#include "caffe2/core/workspace.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
// op schema check
TORCH_API void run_schema_check(const NetDef& net);
namespace memonger {
TORCH_API NetDef optimize_inference_net(
const NetDef& net,
const std::set<string>& static_blobs);
TORCH_API NetDef compute_blob_recycling_for_dag(
const NetDef& net,
const std::vector<string>& heads,
const std::vector<int>& op_indices,
const std::unordered_set<string>& shareable_blob_names,
const string& namescope,
const std::unordered_set<string>& dont_share_blob_names,
const std::unordered_map<string, vector<int>>& blob_shapes);
} // memonger
} // caffe2
#endif
| 817
| 23.058824
| 64
|
h
|
null |
pytorch-main/caffe2/core/module.h
|
/**
* A global dictionary that holds information about what Caffe2 modules have
* been loaded in the current runtime, and also utility functions to load
* modules.
*/
#ifndef CAFFE2_CORE_MODULE_H_
#define CAFFE2_CORE_MODULE_H_
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <memory>
#include <mutex>
#include "caffe2/core/common.h"
#include <c10/util/typeid.h>
namespace caffe2 {
/**
* A module schema that can be used to store specific information about
* different modules. Currently, we only store the name and a simple
* description of what this module does.
*/
class TORCH_API ModuleSchema {
public:
ModuleSchema(const char* name, const char* description);
};
/**
* @brief Current Modules present in the Caffe2 runtime.
* Returns:
* map: a map of modules and (optionally) their description. The key is the
* module name, and the value is the description for that module. The
* module name is recommended to be the part that constitutes the trunk
* of the dynamic library: for example, a module called
* libcaffe2_db_rocksdb.so should have the name "caffe2_db_rocksdb". The
* reason we do not use "lib" is because it's somewhat redundant, and
* the reason we do not include ".so" is for cross-platform compatibility
* on platforms like mac os.
*/
TORCH_API const CaffeMap<string, const ModuleSchema*>& CurrentModules();
/**
* @brief Checks whether a module is already present in the current binary.
*/
TORCH_API bool HasModule(const string& name);
/**
* @brief Load a module.
* Inputs:
* name: a module name or a path name.
* It is recommended that you use the name of the module, and leave the
* full path option to only experimental modules.
* filename: (optional) a filename that serves as a hint to load the module.
*/
TORCH_API void LoadModule(const string& name, const string& filename="");
#define CAFFE2_MODULE(name, description) \
extern "C" { \
bool gCaffe2ModuleSanityCheck##name() { return true; } \
} \
namespace { \
static ::caffe2::ModuleSchema module_schema_##name(#name, description); \
}
} // namespace caffe2
#endif // CAFFE2_CORE_MODULE_H_
| 2,473
| 33.361111
| 79
|
h
|
null |
pytorch-main/caffe2/core/net.h
|
#ifndef CAFFE2_CORE_NET_H_
#define CAFFE2_CORE_NET_H_
#include <atomic>
#include <climits>
#include <cstddef>
#include <thread> // NOLINT
#include <typeinfo>
#include <unordered_map>
#include <vector>
#include "c10/core/thread_pool.h"
#include "c10/util/Registry.h"
#include "caffe2/core/blob.h"
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/observer.h"
#include "caffe2/core/operator_schema.h"
#include "caffe2/core/tensor.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/simple_queue.h"
C10_DECLARE_string(caffe2_override_executor);
namespace caffe2 {
class NetBase;
typedef ObserverBase<NetBase> NetObserver;
typedef std::function<std::unique_ptr<NetObserver>(NetBase*)>
NetObserverCreator;
class OperatorBase;
class Workspace;
// Net is a thin struct that owns all the operators together with the operator
// contexts.
class TORCH_API NetBase : public Observable<NetBase> {
public:
NetBase(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);
virtual ~NetBase() noexcept {}
virtual bool SupportsAsync() = 0;
inline const vector<const Event*>& events() const {
return events_;
}
virtual void Wait() {
// by default just wait till all events are finished
for (const auto& event : events_) {
event->Finish();
}
}
virtual bool Run() {
if (!RunAsync()) {
LOG(ERROR) << "Failed to execute async run";
return false;
}
Wait();
return handleRunError();
}
virtual bool RunAsync();
virtual void Cancel();
/* Benchmarks a network for one individual run so that we can feed new
* inputs on additional calls.
* This function returns the number of microseconds spent
* during the benchmark
*/
virtual float TEST_Benchmark_One_Run();
/**
* Benchmarks a network.
*
* This function returns a vector of float recording the number of milli-
* seconds spent during the benchmark. The 0-th item is the time spent per
* each network run, and if a net instantiation supports run_individual,
* the remainder of the vector returns the number of milliseconds spent per
* operator.
*/
virtual vector<float> TEST_Benchmark(
const int /*warmup_runs*/,
const int /*main_runs*/,
const bool /*run_individual*/);
inline const vector<string>& external_output() const {
return external_output_;
}
inline const vector<string>& external_input() const {
return external_input_;
}
/* Used to attach Observers to operators of a Net
*
* Returns pointers to objects owned with unique_ptrs.
* Use with caution.
*/
virtual vector<OperatorBase*> GetOperators() const = 0;
const string& Name() const {
return name_;
}
inline const NetDef& debug_def() const {
CAFFE_ENFORCE(has_debug_def(), "net_def was null!");
return *net_def_;
}
inline bool has_debug_def() const {
return net_def_ != nullptr;
}
protected:
virtual bool DoRunAsync() {
CAFFE_THROW("Not implemented");
};
virtual bool handleRunError() {
for (const Event* event : events_) {
if (event->Query() != EventStatus::EVENT_SUCCESS) {
CAFFE_THROW(event->ErrorMessage());
}
}
return true;
}
vector<string> external_input_;
vector<string> external_output_;
string name_;
vector<const Event*> events_;
std::shared_ptr<const NetDef> net_def_;
C10_DISABLE_COPY_AND_ASSIGN(NetBase);
};
class TORCH_API ExecutorHelper {
public:
ExecutorHelper() {}
virtual TaskThreadPoolBase* GetPool(const DeviceOption& option) const;
virtual std::vector<OperatorBase*> GetOperators() const;
virtual int GetNumWorkers() const;
virtual ~ExecutorHelper() {}
};
C10_DECLARE_REGISTRY(
NetRegistry,
NetBase,
const std::shared_ptr<const NetDef>&,
Workspace*);
#define REGISTER_NET_CREATOR(key, ...) \
C10_REGISTER_CREATOR(NetRegistry, key, __VA_ARGS__)
#define REGISTER_NET(name, ...) \
C10_REGISTER_CLASS(NetRegistry, name, __VA_ARGS__)
/**
* @brief Creates a network, accessing / creating blobs in the given workspace.
*
* Note that this is different from Workspace::CreateNet. The latter adds the
* created net object to the workspace's net map, while this function returns
* a standalone net object.
*/
TORCH_API unique_ptr<NetBase> CreateNet(const NetDef& net_def, Workspace* ws);
TORCH_API unique_ptr<NetBase> CreateNet(
const std::shared_ptr<const NetDef>& net_def,
Workspace* ws);
TORCH_API void AddGlobalNetObserverCreator(NetObserverCreator creator);
TORCH_API void ClearGlobalNetObservers();
} // namespace caffe2
#endif // CAFFE2_CORE_NET_H_
| 4,634
| 25.335227
| 79
|
h
|
null |
pytorch-main/caffe2/core/net_async_base.h
|
#ifndef CAFFE2_CORE_NET_ASYNC_BASE_H_
#define CAFFE2_CORE_NET_ASYNC_BASE_H_
#include <c10/macros/Macros.h>
#include "c10/core/thread_pool.h"
#include "c10/util/Registry.h"
#include "caffe2/core/common.h"
#include "caffe2/core/net.h"
#include "caffe2/core/net_dag_utils.h"
#include "caffe2/core/prof_dag_counters.h"
#include "caffe2/core/stats.h"
#include "caffe2/core/timer.h"
#include "caffe2/core/workspace.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/proto/prof_dag.pb.h"
#include "caffe2/utils/proto_utils.h"
C10_DECLARE_int(caffe2_streams_per_gpu);
C10_DECLARE_int(caffe2_net_async_max_gpus);
C10_DECLARE_int(caffe2_net_async_max_numa_nodes);
C10_DECLARE_int(caffe2_net_async_thread_pool_size);
C10_DECLARE_bool(caffe2_net_async_check_stream_status);
C10_DECLARE_bool(caffe2_net_async_use_single_pool);
C10_DECLARE_bool(caffe2_net_async_use_per_net_pools);
C10_DECLARE_bool(caffe2_net_async_run_root_tasks_inline);
C10_DECLARE_bool(caffe2_net_async_profile_operators);
namespace caffe2 {
class AsyncNetExecutorHelper;
namespace tracing {
class Tracer;
}
struct ExecutionOptions {
explicit ExecutionOptions(const std::shared_ptr<const NetDef>& net_def);
// number of gpu streams per gpu per cpu thread
int streams_per_gpu_ = 1;
// ops synchronization options
bool finish_chain_ = false;
bool always_schedule_child_ = false;
// try to pick gpu stream that is not busy
bool check_stream_status_ = false;
// use single thread pool for all devices
bool use_single_pool_ = false;
// use per net instances thread pools instead of global ones
bool use_per_net_pools_ = false;
// whether RunAsync is blocking
bool is_blocking_ = false;
// prof_dag counters reporting
bool report_stats_ = false;
// immediately run children tasks inline whenever possible
bool use_dfs_scheduling_ = false;
// run net's root tasks in RunAsync thread instead of in thread pool
bool run_root_tasks_inline_ = false;
};
struct TORCH_API AsyncNetCancelled : public std::exception {
const char* what() const noexcept override {
return "Cancelled";
}
};
class TORCH_API AsyncNetBase : public NetBase {
public:
AsyncNetBase(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);
~AsyncNetBase() override;
bool SupportsAsync() override {
return true;
}
vector<OperatorBase*> GetOperators() const override {
return operators_;
}
bool RunAsync() override;
const dag_utils::ExecutionChains& TEST_execution_chains() const {
return execution_chains_;
}
ProfDAGProtos GetOperatorStats() const;
ProfDAGProtos GetPerOperatorCost() const;
ProfDAGReport GetProfReport() const;
protected:
bool canSchedule(
int chain_id,
const std::vector<EventStatus>* status = nullptr,
bool* parent_failed = nullptr);
bool canSchedule(int parent_id, int child_id);
int tasksNum() const;
Event& event(int task_id) const;
EventStatus query(int task_id) const;
const std::vector<int>& children(int task_id) const;
const std::vector<int>& parents(int task_id) const;
int updateParentCount(int child_id);
int getParentCount(int child_id);
bool testAndSetScheduled(int task_id);
int numOps(int task_id) const;
int firstTaskOpId(int task_id) const;
int lastTaskOpId(int task_id) const;
const OperatorBase* firstTaskOp(int task_id) const;
const OperatorBase* lastTaskOp(int task_id) const;
OperatorBase* firstTaskOp(int task_id);
OperatorBase* lastTaskOp(int task_id);
void asyncWait(
int task_id,
int stream_id,
const std::vector<int>& wait_task_ids) const;
bool run(int task_id, int stream_id) noexcept;
int stream(int task_id);
TaskThreadPoolBase* pool(const DeviceOption& device_option);
TaskThreadPoolBase* pool();
void finishTasks(const std::unordered_set<int>& task_ids);
void finalizeEvents();
bool isStreamFree(int task_id, int stream_id) const;
virtual void reset();
bool handleRunError() override;
// Operator/task graph
std::vector<OperatorBase*> operators_;
std::vector<dag_utils::OperatorNode> operator_nodes_;
std::vector<std::vector<int>> chains_;
std::vector<dag_utils::OpGraphNode> chain_nodes_; // chains' parents/children
dag_utils::ExecutionChains execution_chains_; // for testing
// Pools and streams
std::mutex pools_mutex_;
// first int key - device id, second - pool size, one pool per (device, size)
typedef std::unordered_map<
int,
std::unordered_map<int, std::shared_ptr<TaskThreadPoolBase>>>
PoolsMap;
PoolsMap cpu_pools_;
PoolsMap gpu_pools_;
static std::vector<int>& getStreamCounters();
int num_workers_;
// Exception/error handling
void handleChainError(
int task_id,
OperatorBase* op,
const char* err_msg,
bool save_exception = false) noexcept;
std::atomic<bool> success_;
// Tracing
std::shared_ptr<tracing::Tracer> tracer_;
// execution mode flags
ExecutionOptions options_;
ProfDAGCounters counters_;
C10_DISABLE_COPY_AND_ASSIGN(AsyncNetBase);
private:
TaskThreadPoolBase*
poolGetter(PoolsMap& pools, int device_type, int device_id, int pool_size);
std::unique_ptr<AsyncNetExecutorHelper> helper_;
friend class AsyncNetExecutorHelper;
friend class tracing::Tracer;
};
class AsyncNetExecutorHelper : public ExecutorHelper {
public:
explicit AsyncNetExecutorHelper(AsyncNetBase* net) : net_(net) {}
TaskThreadPoolBase* GetPool(const DeviceOption& option) const override {
return net_->pool(option);
}
private:
AsyncNetBase* net_;
};
template <class TaskThreadPoolImpl, int device_type>
std::shared_ptr<TaskThreadPoolBase>
GetAsyncNetThreadPool(int device_id, int pool_size, bool create_new) {
static std::unordered_map<
int,
std::unordered_map<int, std::weak_ptr<TaskThreadPoolBase>>>
pools;
static std::mutex pool_mutex;
const auto& device_type_name = DeviceTypeName(device_type);
if (pool_size <= 0) {
if (FLAGS_caffe2_net_async_thread_pool_size > 0) {
pool_size = FLAGS_caffe2_net_async_thread_pool_size;
LOG(INFO) << "Using default " << device_type_name
<< " pool size: " << pool_size << "; device id: " << device_id;
} else {
auto num_cores = std::thread::hardware_concurrency();
CAFFE_ENFORCE(num_cores > 0, "Failed to get number of CPU cores");
LOG(INFO) << "Using estimated " << device_type_name
<< " pool size: " << num_cores << "; device id: " << device_id;
pool_size = num_cores;
}
} else {
LOG(INFO) << "Using specified " << device_type_name
<< " pool size: " << pool_size << "; device id: " << device_id;
}
if (create_new) {
LOG(INFO) << "Created new " << device_type_name
<< " pool, size: " << pool_size << "; device id: " << device_id;
return std::make_shared<TaskThreadPoolImpl>(pool_size, device_id);
} else {
std::lock_guard<std::mutex> lock(pool_mutex);
auto shared_pool = pools[device_id][pool_size].lock();
if (!shared_pool) {
LOG(INFO) << "Created shared " << device_type_name
<< " pool, size: " << pool_size << "; device id: " << device_id;
shared_pool = std::make_shared<TaskThreadPoolImpl>(pool_size, device_id);
pools[device_id][pool_size] = shared_pool;
}
return shared_pool;
}
}
} // namespace caffe2
#endif // CAFFE2_CORE_NET_ASYNC_BASE_H_
| 7,397
| 30.084034
| 80
|
h
|
null |
pytorch-main/caffe2/core/net_async_scheduling.h
|
#ifndef CAFFE2_CORE_NET_ASYNC_SCHEDULING_H_
#define CAFFE2_CORE_NET_ASYNC_SCHEDULING_H_
#include "caffe2/core/net_async_base.h"
namespace caffe2 {
class TORCH_API AsyncSchedulingNet : public AsyncNetBase {
public:
AsyncSchedulingNet(
const std::shared_ptr<const NetDef>& net_def,
Workspace* ws);
~AsyncSchedulingNet() override;
void Wait() override;
void Cancel() override;
protected:
bool RunAsync() override;
void pollAndSchedule(int task_id);
void schedule(int task_id, bool run_inline = false) noexcept;
void reset() override;
virtual void finishRun();
void parentCallback(int parent_id);
bool isInlineTask(int parent_id, int child_id) const;
void CancelAndFinishAsyncTasks();
std::mutex running_mutex_;
std::condition_variable running_cv_;
std::atomic<bool> running_;
std::atomic<int> processed_tasks_num_;
C10_DISABLE_COPY_AND_ASSIGN(AsyncSchedulingNet);
};
} // namespace caffe2
#endif // CAFFE2_CORE_NET_ASYNC_SCHEDULING_H_
| 993
| 22.116279
| 63
|
h
|
null |
pytorch-main/caffe2/core/net_async_task_future.h
|
#ifndef CAFFE2_NET_ASYNC_TASK_FUTURE_H
#define CAFFE2_NET_ASYNC_TASK_FUTURE_H
#include <atomic>
#include <condition_variable>
#include <functional>
#include <memory>
#include <mutex>
#include <string>
#include <vector>
namespace caffe2 {
// Represents the state of AsyncTask execution, that can be queried with
// IsCompleted/IsFailed. Callbacks are supported through SetCallback and
// are called upon future's completion.
class AsyncTaskFuture {
public:
AsyncTaskFuture();
// Creates a future completed when all given futures are completed
explicit AsyncTaskFuture(const std::vector<AsyncTaskFuture*>& futures);
~AsyncTaskFuture();
AsyncTaskFuture(const AsyncTaskFuture&) = delete;
AsyncTaskFuture& operator=(const AsyncTaskFuture&) = delete;
bool IsCompleted() const;
bool IsFailed() const;
std::string ErrorMessage() const;
void Wait() const;
void SetCallback(std::function<void(const AsyncTaskFuture*)> callback);
void SetCompleted(const char* err_msg = nullptr);
void ResetState();
private:
mutable std::mutex mutex_;
mutable std::condition_variable cv_completed_;
std::atomic<bool> completed_;
std::atomic<bool> failed_;
std::string err_msg_;
std::vector<std::function<void(const AsyncTaskFuture*)>> callbacks_;
struct ParentCounter {
explicit ParentCounter(int init_parent_count)
: init_parent_count_(init_parent_count),
parent_count(init_parent_count),
parent_failed(false) {}
void Reset() {
std::unique_lock<std::mutex> lock(err_mutex);
parent_count = init_parent_count_;
parent_failed = false;
err_msg = "";
}
const int init_parent_count_;
std::atomic<int> parent_count;
std::mutex err_mutex;
std::atomic<bool> parent_failed;
std::string err_msg;
};
std::unique_ptr<ParentCounter> parent_counter_;
};
} // namespace caffe2
#endif // CAFFE2_NET_ASYNC_TASK_FUTURE_H
| 1,925
| 24.012987
| 73
|
h
|
null |
pytorch-main/caffe2/core/net_async_task_graph.h
|
#ifndef CAFFE2_NET_ASYNC_TASK_GRAPH_H
#define CAFFE2_NET_ASYNC_TASK_GRAPH_H
#include "caffe2/core/net_async_base.h"
#include "caffe2/core/net_async_task.h"
#include "caffe2/core/net_async_task_future.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
// AsyncTaskGraph represents an execution of a net, it owns the tasks and
// associated futures, sets up future callbacks and propagates errors.
// Usage steps:
// - Adding graph nodes and edges through CreateNode/AddDependency;
// - Freezing the graph (FreezeGraph), after the freezing a future
// can be obtained using GetFuture;
// - Execution of the graph is scheduled through ExecuteGraph, after each
// execution Reset must be called to prepare the graph for the next run
class AsyncTaskGraphBase {
public:
virtual bool CreateNode(
int node_id,
const std::vector<OperatorBase*>& ops) = 0;
virtual bool AddDependency(
int child_node_id,
const std::vector<int>& parent_node_ids) = 0;
virtual void FreezeGraph() = 0;
virtual AsyncTaskFuture* ExecuteGraph() = 0;
virtual AsyncTaskFuture* GetFuture() = 0;
virtual void Reset() = 0;
virtual ~AsyncTaskGraphBase() noexcept {}
};
class AsyncTaskGraph : public AsyncTaskGraphBase {
public:
AsyncTaskGraph(ExecutorHelper* helper, const ExecutionOptions& options);
bool CreateNode(int node_id, const std::vector<OperatorBase*>& ops) override;
bool AddDependency(int child_node_id, const std::vector<int>& parent_node_ids)
override;
void FreezeGraph() override;
AsyncTaskFuture* ExecuteGraph() override;
AsyncTaskFuture* GetFuture() override;
void Reset() override;
private:
// used to, e.g., get access to executor's thread pools
// TODO: pass tracer and counters through ExecutorHelper
ExecutorHelper* helper_;
ExecutionOptions options_;
bool frozen_;
std::unordered_map<int, std::unique_ptr<AsyncTask>> nodes_;
std::unordered_map<int, std::unordered_set<int>> parents_;
std::unordered_map<int, std::unordered_set<int>> children_;
std::vector<std::unique_ptr<AsyncTaskFuture>> edge_futures_;
std::vector<AsyncTask*> root_tasks_;
std::unique_ptr<AsyncTaskFuture> run_future_;
};
} // namespace caffe2
#endif // CAFFE2_NET_ASYNC_TASK_GRAPH_H
| 2,253
| 27.531646
| 80
|
h
|
null |
pytorch-main/caffe2/core/net_async_tracing.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_CORE_NET_ASYNC_TRACING_H_
#define CAFFE2_CORE_NET_ASYNC_TRACING_H_
#include "caffe2/core/common.h"
#include "caffe2/core/net_async_base.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/timer.h"
C10_DECLARE_string(caffe2_net_async_tracing_filepath);
C10_DECLARE_string(caffe2_net_async_names_to_trace);
C10_DECLARE_int(caffe2_net_async_tracing_nth);
namespace caffe2 {
namespace tracing {
struct TORCH_API TracerEvent {
int op_id_ = -1;
int task_id_ = -1;
int stream_id_ = -1;
const char* name_ = nullptr;
const char* category_ = nullptr;
long timestamp_ = -1.0;
bool is_beginning_ = false;
long thread_label_ = -1;
std::thread::id tid_;
int iter_ = -1;
};
enum TracingField {
TRACE_OP,
TRACE_TASK,
TRACE_STREAM,
TRACE_THREAD,
TRACE_NAME,
TRACE_CATEGORY,
TRACE_ITER,
};
enum class TracingMode {
EVERY_K_ITERATIONS,
GLOBAL_TIMESLICE,
};
struct TracingConfig {
TracingMode mode{TracingMode::EVERY_K_ITERATIONS};
std::string filepath{"/tmp"};
// for TracingMode::EVERY_K_ITERATIONS
int64_t trace_every_nth_batch = 100;
int64_t dump_every_nth_batch = 10000;
// for TracingMode::GLOBAL_TIMESLICE
int64_t trace_every_n_ms = 2 * 60 * 1000; // 2min
int64_t trace_for_n_ms = 1000; // 1sec
};
class TORCH_API Tracer {
public:
Tracer(
const NetBase* net,
const std::string& net_name,
TracingConfig = TracingConfig{});
void recordEvent(const TracerEvent& event);
std::string opTraceName(const OperatorBase* op);
std::string opBlobsInfo(const OperatorBase& op);
std::string serializeEvent(const TracerEvent& event);
void linearizeEvents();
void renameThreads();
void setEnabled(bool enabled);
bool isEnabled() const;
const TracingConfig& config() {
return config_;
}
int bumpIter();
int getIter();
int bumpDumpingIter();
// Dump the tracing result to file with given suffix, and then
// clear current events.
void dumpTracingResultAndClearEvents(const std::string& file_suffix);
virtual ~Tracer();
private:
const NetBase* net_ = nullptr;
std::string filename_;
std::vector<TracerEvent> events_;
std::mutex tracer_mutex_;
bool enabled_ = false;
Timer timer_;
int iter_;
int dumping_iter_;
TracingConfig config_;
friend class TracerGuard;
};
class TORCH_API TracerGuard {
public:
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
TracerGuard() {}
void init(Tracer* tracer);
void addArgument();
void addArgument(TracingField field, const char* value);
void addArgument(TracingField field, int value);
template <typename T, typename... Args>
void addArgument(TracingField field, const T& value, const Args&... args) {
addArgument(field, value);
addArgument(args...);
}
void recordEventStart();
virtual ~TracerGuard();
static TracerGuard* getCurrentTracerGuard();
void disable();
private:
bool enabled_ = false;
TracerEvent event_;
Tracer* tracer_;
};
// Extract the shard id from name of the form "...shard:123..."
// Return -1 if there is no shard found
TORCH_API int extractShardId(const std::string& name);
// Check if the net name is white-listed for tracing (specified via a command
// line flag)
TORCH_API bool isTraceableNetName(const std::string& net_name);
TORCH_API std::shared_ptr<Tracer> create(
const NetBase* net,
const std::string& net_name);
TORCH_API bool startIter(const std::shared_ptr<Tracer>& tracer);
} // namespace tracing
#define TRACE_NAME_CONCATENATE(s1, s2) s1##s2
#define TRACE_ANONYMOUS_NAME(str) TRACE_NAME_CONCATENATE(str, __LINE__)
#define TRACE_EVENT_INIT(...) \
TRACE_ANONYMOUS_NAME(trace_guard).init(tracer_.get()); \
TRACE_ANONYMOUS_NAME(trace_guard).addArgument(__VA_ARGS__); \
TRACE_ANONYMOUS_NAME(trace_guard).recordEventStart();
// Supposed to be used only once per scope in AsyncNetBase-derived nets
#define TRACE_EVENT(...) \
tracing::TracerGuard TRACE_ANONYMOUS_NAME(trace_guard); \
if (tracer_ && tracer_->isEnabled()) { \
TRACE_EVENT_INIT(__VA_ARGS__) \
}
#define TRACE_EVENT_IF(cond, ...) \
tracing::TracerGuard TRACE_ANONYMOUS_NAME(trace_guard); \
if (tracer_ && tracer_->isEnabled() && (cond)) { \
TRACE_EVENT_INIT(__VA_ARGS__) \
}
} // namespace caffe2
#endif // CAFFE2_CORE_NET_ASYNC_TRACING_H_
| 5,093
| 26.836066
| 77
|
h
|
null |
pytorch-main/caffe2/core/net_dag_utils.h
|
#ifndef CAFFE2_CORE_NET_DAG_UTILS_H_
#define CAFFE2_CORE_NET_DAG_UTILS_H_
#include <atomic>
#include <climits>
#include <cstddef>
#include <thread> // NOLINT
#include <typeinfo>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "c10/util/Registry.h"
#include "caffe2/core/blob.h"
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/net.h"
#include "caffe2/core/observer.h"
#include "caffe2/core/operator_schema.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/workspace.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/simple_queue.h"
namespace caffe2 {
namespace dag_utils {
struct OperatorNode {
unique_ptr<OperatorBase> operator_;
vector<int> children_;
vector<int> parents_;
std::atomic<int> runtime_parent_count_;
bool is_chain_start_ = false;
std::atomic_flag scheduled_ = ATOMIC_FLAG_INIT;
};
struct OpGraphNode {
vector<int> children_;
vector<int> parents_;
int visited_inputs = 0;
int num_orig_parents;
};
using ExecutionChains = std::unordered_map<int, std::vector<int>>;
C10_EXPORT ExecutionChains computeChains(std::vector<OperatorNode>& orig_nodes);
// Instead of breaking down the DAG into chains, we partition it into clusters
// of sync ops and individual async op. This is useful for disturbuted inference
// case where we have sync and async cpu ops. Note that we have go sync each
// aysnc op instead of put them into the chain and sync its tail like GPU op,
// because CPU async ops are typically rpc calls and are not guaranteed to be
// linearized at remote site.
C10_EXPORT ExecutionChains computeGroups(std::vector<OperatorNode>& orig_nodes);
C10_EXPORT ExecutionChains singleChains(std::vector<OperatorNode>& nodes);
C10_EXPORT std::vector<OperatorNode> prepareOperatorNodes(
const std::shared_ptr<const NetDef>& net_def,
Workspace* ws);
std::vector<OpGraphNode> prepareChainGraphNodes(
const std::vector<dag_utils::OperatorNode>& operator_nodes,
const std::vector<std::vector<int>>& execution_chains);
} // namespace dag_utils
} // namespace caffe2
#endif // CAFFE2_CORE_NET_DAG_UTILS_H_
| 2,146
| 29.671429
| 80
|
h
|
null |
pytorch-main/caffe2/core/net_parallel.h
|
#ifndef CAFFE2_CORE_NET_PARALLEL_H
#define CAFFE2_CORE_NET_PARALLEL_H
#include "caffe2/core/net_async_base.h"
#include "caffe2/core/net_async_task_graph.h"
C10_DECLARE_string(caffe2_task_graph_engine);
namespace caffe2 {
class ParallelNetExecutorHelper;
class TORCH_API ParallelNet : public NetBase {
public:
ParallelNet(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);
bool RunAsync() override;
void Wait() override;
bool SupportsAsync() override;
std::vector<OperatorBase*> GetOperators() const override;
TaskThreadPoolBase* Pool(const DeviceOption& device_option);
protected:
bool handleRunError() override;
virtual void finishRun();
virtual void reset();
ExecutionOptions options_;
int num_workers_;
std::unique_ptr<ParallelNetExecutorHelper> helper_;
std::shared_ptr<AsyncTaskGraphBase> task_graph_;
AsyncTaskFuture* run_future_;
std::vector<dag_utils::OperatorNode> operator_nodes_;
std::vector<OperatorBase*> operators_;
std::mutex pools_mutex_;
typedef std::unordered_map<
int,
std::unordered_map<int, std::shared_ptr<TaskThreadPoolBase>>>
PoolsMap;
PoolsMap cpu_pools_;
PoolsMap gpu_pools_;
TaskThreadPoolBase*
poolGetter(PoolsMap& pools, int device_type, int device_id, int pool_size);
friend class ParallelNetExecutorHelper;
C10_DISABLE_COPY_AND_ASSIGN(ParallelNet);
};
C10_DECLARE_SHARED_REGISTRY(
TaskGraphRegistry,
AsyncTaskGraphBase,
ExecutorHelper*,
const ExecutionOptions&);
std::shared_ptr<AsyncTaskGraphBase> GetAsyncTaskGraph(
ExecutorHelper* helper,
const ExecutionOptions& options);
class ParallelNetExecutorHelper : public ExecutorHelper {
public:
explicit ParallelNetExecutorHelper(ParallelNet* net) : net_(net) {}
TaskThreadPoolBase* GetPool(const DeviceOption& option) const override {
return net_->Pool(option);
}
std::vector<OperatorBase*> GetOperators() const override {
return net_->GetOperators();
}
int GetNumWorkers() const override {
return net_->num_workers_;
}
private:
ParallelNet* net_;
};
} // namespace caffe2
#endif // CAFFE2_CORE_NET_PARALLEL_H
| 2,144
| 23.94186
| 77
|
h
|
null |
pytorch-main/caffe2/core/net_simple.h
|
#ifndef CAFFE2_CORE_NET_SIMPLE_H_
#define CAFFE2_CORE_NET_SIMPLE_H_
#include <vector>
#include "c10/util/Registry.h"
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/net.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/workspace.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
struct IndividualMetrics {
public:
explicit IndividualMetrics(const std::vector<OperatorBase*>& operators)
: main_runs_(0), operators_(operators) {
const auto num_ops = operators_.size();
time_per_op.resize(num_ops, 0.0);
}
// run ops while collecting profiling results
void RunOpsWithProfiling();
// print out profiling results
void PrintOperatorProfilingResults();
const vector<float>& GetTimePerOp() {
return time_per_op;
}
float setup_time{0.0};
float memory_alloc_time{0.0};
float memory_dealloc_time{0.0};
float output_dealloc_time{0.0};
private:
int main_runs_;
const std::vector<OperatorBase*>& operators_;
vector<float> time_per_op;
vector<uint64_t> flops_per_op;
vector<uint64_t> memory_bytes_read_per_op;
vector<uint64_t> memory_bytes_written_per_op;
vector<uint64_t> param_bytes_per_op;
CaffeMap<string, int> num_ops_per_op_type_;
CaffeMap<string, float> time_per_op_type;
CaffeMap<string, float> flops_per_op_type;
CaffeMap<string, float> memory_bytes_read_per_op_type;
CaffeMap<string, float> memory_bytes_written_per_op_type;
CaffeMap<string, float> param_bytes_per_op_type;
};
// This is the very basic structure you need to run a network - all it
// does is simply to run everything in sequence. If you want more fancy control
// such as a DAG-like execution, check out other better net implementations.
class TORCH_API SimpleNet : public NetBase {
public:
SimpleNet(const std::shared_ptr<const NetDef>& net_def, Workspace* ws);
bool SupportsAsync() override {
return false;
}
vector<float> TEST_Benchmark(
const int warmup_runs,
const int main_runs,
const bool run_individual) override;
/*
* This returns a list of pointers to objects stored in unique_ptrs.
* Used by Observers.
*
* Think carefully before using.
*/
vector<OperatorBase*> GetOperators() const override {
vector<OperatorBase*> op_list;
for (auto& op : operators_) {
op_list.push_back(op.get());
}
return op_list;
}
protected:
bool Run() override;
bool RunAsync() override;
vector<unique_ptr<OperatorBase>> operators_;
C10_DISABLE_COPY_AND_ASSIGN(SimpleNet);
};
} // namespace caffe2
#endif // CAFFE2_CORE_NET_SIMPLE_H_
| 2,606
| 25.876289
| 79
|
h
|
null |
pytorch-main/caffe2/core/net_simple_refcount.h
|
#ifndef CAFFE2_CORE_NET_SIMPLE_REFCOUNT_H_
#define CAFFE2_CORE_NET_SIMPLE_REFCOUNT_H_
#include <vector>
#include "c10/util/Registry.h"
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/net.h"
#include "caffe2/core/net_simple.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/workspace.h"
#include "caffe2/proto/caffe2_pb.h"
namespace caffe2 {
// SimpleRefcountNet is an implementation that adds an additional abstraction
// on top of SimpleRefCountNet: it tracks all the tensors and for those that are
// considered internal/temporary, delete them once their refcount go to zero.
// In the context of a simple static run, this can be carried out during
// construction time: we will do a pass through the network and track what
// blobs we need to do reset on, after the execution of every op.
//
// To identify which blob is considered temporary, we employ the following
// strategy: any blob that is
// (1) consumed but not produced by ops in the net, or
// (2) produced but not consumed by ops in the net, or
// (3) is marked as external_output in the protobuf
// will NOT be considered temporary.
//
// In the long run, we should design proper functional interfaces so that
// nets are less imperative and more functional.
//
// Also, for now, SimpleRefCountNet should only be used for benchmarking
// purposes and not product use, since it is not going to provide better
// performance gain, and is implicitly incompatible with the contract that
// earlier Nets expose - that all intermediate blobs are visible to the users.
class SimpleRefCountNet final : public SimpleNet {
public:
SimpleRefCountNet(
const std::shared_ptr<const NetDef>& net_def,
Workspace* ws);
protected:
bool Run() override;
using SimpleNet::operators_;
private:
// The list of blobs to delete when each operator finishes its run.
// This will be populated during construction time.
vector<vector<Blob*>> delete_list_;
C10_DISABLE_COPY_AND_ASSIGN(SimpleRefCountNet);
};
} // namespace caffe2
#endif // CAFFE2_CORE_NET_SIMPLE_REFCOUNT_H_
| 2,097
| 33.966667
| 80
|
h
|
null |
pytorch-main/caffe2/core/observer.h
|
#pragma once
#include <memory>
#include <unordered_set>
#include "caffe2/core/logging.h"
namespace caffe2 {
/**
* Use this to implement a Observer using the Observer Pattern template.
*/
template <class T>
class ObserverBase {
public:
explicit ObserverBase(T* subject) : subject_(subject) {}
virtual void Start() {}
virtual void Stop() {}
virtual std::string debugInfo() {
return "Not implemented.";
}
virtual ~ObserverBase() noexcept {};
T* subject() const {
return subject_;
}
virtual std::unique_ptr<ObserverBase<T>> rnnCopy(T* subject, int rnn_order)
const {
return nullptr;
};
protected:
T* subject_;
};
/**
* Inherit to make your class observable.
*/
template <class T>
class Observable {
public:
Observable() = default;
Observable(Observable&&) = default;
Observable& operator =(Observable&&) = default;
virtual ~Observable() = default;
C10_DISABLE_COPY_AND_ASSIGN(Observable);
using Observer = ObserverBase<T>;
/* Returns a reference to the observer after addition. */
const Observer* AttachObserver(std::unique_ptr<Observer> observer) {
CAFFE_ENFORCE(observer, "Couldn't attach a null observer.");
std::unordered_set<const Observer*> observers;
for (auto& ob : observers_list_) {
observers.insert(ob.get());
}
const auto* observer_ptr = observer.get();
if (observers.count(observer_ptr)) {
return observer_ptr;
}
observers_list_.push_back(std::move(observer));
UpdateCache();
return observer_ptr;
}
/**
* Returns a unique_ptr to the removed observer. If not found, return a
* nullptr
*/
std::unique_ptr<Observer> DetachObserver(const Observer* observer_ptr) {
for (auto it = observers_list_.begin(); it != observers_list_.end(); ++it) {
if (it->get() == observer_ptr) {
auto res = std::move(*it);
observers_list_.erase(it);
UpdateCache();
return res;
}
}
return nullptr;
}
virtual size_t NumObservers() {
return num_observers_;
}
private:
inline static void StartObserver(Observer* observer) {
try {
observer->Start();
} catch (const std::exception& e) {
LOG(ERROR) << "Exception from observer: " << e.what();
} catch (...) {
LOG(ERROR) << "Exception from observer: unknown";
}
}
inline static void StopObserver(Observer* observer) {
try {
observer->Stop();
} catch (const std::exception& e) {
LOG(ERROR) << "Exception from observer: " << e.what();
} catch (...) {
LOG(ERROR) << "Exception from observer: unknown";
}
}
void UpdateCache() {
num_observers_ = observers_list_.size();
if (num_observers_ != 1) {
// we cannot take advantage of the cache
return;
}
observer_cache_ = observers_list_[0].get();
}
public:
void StartAllObservers() {
// do not access observers_list_ unless necessary
if (num_observers_ == 0) {
return;
} else if (num_observers_ == 1) {
StartObserver(observer_cache_);
} else {
for (auto& observer : observers_list_) {
StartObserver(observer.get());
}
}
}
void StopAllObservers() {
// do not access observers_list_ unless necessary
if (num_observers_ == 0) {
return;
} else if (num_observers_ == 1) {
StopObserver(observer_cache_);
} else {
for (auto& observer : observers_list_) {
StopObserver(observer.get());
}
}
}
private:
// an on-stack cache for fast iteration;
// ideally, inside StartAllObservers and StopAllObservers,
// we should never access observers_list_
Observer* observer_cache_;
size_t num_observers_ = 0;
protected:
std::vector<std::unique_ptr<Observer>> observers_list_;
};
} // namespace caffe2
| 3,809
| 22.090909
| 80
|
h
|
null |
pytorch-main/caffe2/core/operator_gradient.h
|
#ifndef CAFFE2_CORE_OPERATOR_GRADIENT_H_
#define CAFFE2_CORE_OPERATOR_GRADIENT_H_
#include "c10/util/Registry.h"
#include "caffe2/core/operator_schema.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/proto_utils.h"
namespace caffe2 {
/* @brief A struct that abstracts on top of dense and sparse blobs.
*
* For a dense blob, its gradient name should be written into dense_, and for
* a sparse blob, its gradient name should be written into indice_ for
* the sparse indices and value_ for the values.
*/
struct TORCH_API GradientWrapper {
string dense_;
string indices_;
string values_;
inline bool IsDense() const {
return (dense_.size() != 0);
}
inline bool IsSparse() const {
return (indices_.size() != 0 || values_.size() != 0);
}
inline bool IsEmpty() const {
return (!IsDense() && !IsSparse());
}
};
/**
* A struct that holds the gradient operators and related gradient maps.
*/
struct TORCH_API GradientOpsMeta {
vector<OperatorDef> ops_;
vector<GradientWrapper> g_input_;
GradientOpsMeta() {}
GradientOpsMeta(
const vector<OperatorDef>& ops,
const vector<GradientWrapper>& v)
: ops_(ops), g_input_(v) {}
};
class TORCH_API GradientMakerBase {
public:
GradientMakerBase(
const OperatorDef& def,
const vector<GradientWrapper>& g_output)
: def_(def), g_output_(g_output), g_input_(def.input_size()){};
virtual ~GradientMakerBase() {}
virtual bool CopyDeviceOption() const {
return true;
}
virtual bool CopyEngine() const {
return true;
}
virtual bool CopyArguments() const {
return true;
}
virtual void VerifyOp() const {
auto* schema = OpSchemaRegistry::Schema(def_.type());
if (schema) {
CAFFE_ENFORCE(
schema->Verify(def_),
"(GradientMaker) Operator def did not pass schema checking: ",
ProtoDebugString(def_));
}
}
/**
* @brief Returns the gradient ops meta.
*
* If your gradient op generator only use standard input and output
* manipulations, you can simply implement GetGradientDefs() that
* returns vector<OperatorDef>. In that, you can call GI, GI_V and GI_I
* that will automatically create the gradient registration for you.
*
* If you need to do custom gradient name registration, overload this
* function directly.
*/
virtual GradientOpsMeta Get() {
VerifyOp();
vector<OperatorDef> new_defs = GetGradientDefs();
for (auto& opdef : new_defs) {
opdef.set_is_gradient_op(true);
}
return GradientOpsMeta(new_defs, g_input_);
};
const OperatorDef& Def() const {
return def_;
}
protected:
virtual vector<OperatorDef> GetGradientDefs() {
CAFFE_NOT_IMPLEMENTED;
}
// Helper functions to return names for the gradient computation.
// I(idx), O(idx): return the input and output names.
// GO(idx): return the name of the gradient for output idx.
// GI(idx), GI_I(idx), GI_V(idx): return the name of the gradient for
// input idx, and also registers that name into the gradient
// registry to be returned.
string I(const int i) {
CAFFE_ENFORCE((i >= 0) && (i < def_.input().size()));
return def_.input(i);
}
string O(const int i) {
CAFFE_ENFORCE((i >= 0) && (i < def_.output().size()));
return def_.output(i);
}
string GI(const int i) {
CAFFE_ENFORCE(
!g_input_.at(i).IsSparse(),
"Input ",
def_.input(i),
" already set to sparse.");
g_input_.at(i).dense_ = GradientName(def_.input(i));
return GradientName(def_.input(i));
}
string GI_I(const int i) {
CAFFE_ENFORCE(
!g_input_.at(i).IsDense(),
"Input ",
def_.input(i),
" already set to dense.");
g_input_.at(i).indices_ = GradientSliceIndices(def_.input(i));
return GradientSliceIndices(def_.input(i));
}
string GI_V(const int i) {
CAFFE_ENFORCE(
!g_input_.at(i).IsDense(),
"Input ",
def_.input(i),
" already set to dense.");
g_input_.at(i).values_ = GradientSliceValues(def_.input(i));
return GradientSliceValues(def_.input(i));
}
string GO(const int i) {
CAFFE_ENFORCE(
g_output_.at(i).IsDense(),
"Gradient of output ",
def_.output(i),
(g_output_.at(i).IsSparse() ? " is sparse (expected dense)."
: " is not provided!"));
return g_output_.at(i).dense_;
}
string GO_I(const int i) {
CAFFE_ENFORCE(
g_output_.at(i).IsSparse(),
"Gradient of output ",
def_.output(i),
(g_output_.at(i).IsDense() ? " is dense (expected sparse)."
: " is not provided!"));
return g_output_.at(i).indices_;
}
string GO_V(const int i) {
CAFFE_ENFORCE(
g_output_.at(i).IsSparse(),
"Gradient of output ",
def_.output(i),
(g_output_.at(i).IsDense() ? " is dense (expected sparse)."
: " is not provided!"));
return g_output_.at(i).values_;
}
const GradientWrapper& GradOut(int i) {
return g_output_.at(i);
}
// Function to add a gradient pair to map.
void SetDense(const int i, const string& name) {
CAFFE_ENFORCE(
!g_input_.at(i).IsSparse(),
"Input ",
def_.input(i),
" already set to sparse.");
g_input_.at(i).dense_ = name;
}
void SetSparse(const int i, const string& indices, const string& values) {
CAFFE_ENFORCE(
!g_input_.at(i).IsDense(),
"Input ",
def_.input(i),
" already set to dense.");
g_input_.at(i).indices_ = indices;
g_input_.at(i).values_ = values;
}
/**
* @brief a helper function to allow one to create one single operator
* def, which is usually the case for many simple operators.
*/
template <class... Args>
inline static vector<OperatorDef> SingleGradientDef(const Args&... args) {
return vector<OperatorDef>{CreateOperatorDef(args...)};
}
public:
/**
* Returns map that returns the parameters that the gradients are for.
*/
static CaffeMap<string, string> MatchGradsToParams(const OperatorDef& op) {
// NOTE: how to go beyond string-matching?
CaffeMap<string, string> m;
for (auto& out : op.output()) {
if (IsGradientBlob(out)) {
m[out] = out.substr(0, out.length() - 5);
}
}
return m;
}
private:
// Utility functions for gradient name computation. We don't expose them
// in order to discourage the use of such names explicitly.
static string GradientName(const string& name) {
return name + "_grad";
}
static bool IsGradientBlob(const string& name) {
return name.length() > 5 && name.find("_grad") == name.length() - 5;
}
static string GradientNameToParam(const string& name) {
CHECK(IsGradientBlob(name));
return name.substr(0, name.length() - 5);
}
static string GradientSliceIndices(const string& name) {
return name + "_grad_indices";
}
static string GradientSliceValues(const string& name) {
return name + "_grad_values";
}
protected:
// We make the member variables protected in case someone wants to write
// a fully custom Get() function.
const OperatorDef& def_;
const vector<GradientWrapper>& g_output_;
vector<GradientWrapper> g_input_;
};
/**
* @brief A helper class to indicate that the operator does not need gradient
* computation.
*
* Use the macro NO_GRADIENT to register operators that do not have gradients.
* Note that this is different fron SHOULD_NOT_DO_GRADIENT: the latter means
* that the gradient computation should not flow through it at all, and throws
* an error if it is called.
*/
class TORCH_API NoGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
return vector<OperatorDef>();
}
};
/**
* @brief A helper class to indicate that the operator should have no gradient.
*
* This is used when the operator definition is designed to not have a gradient.
* Calling a gradient on this operator def will cause Caffe2 to quit.
*/
struct ThrowInTheTowelIfGradientIsCalled : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
GradientOpsMeta Get() override {
CAFFE_THROW("One should not call gradient for operator ", def_.type(), ".");
}
};
/**
* @brief A helper class to indicate that the gradient mechanism is not ready.
*
* This should only be used sparsely when the gradient does exist, but we have
* not implemented it yet and are using this as a lazy excuse. Eventually, a
* gradient operator should be implemented.
*/
struct GradientNotImplementedYet : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
GradientOpsMeta Get() override {
CAFFE_THROW(
"Operator ",
def_.type(),
" should have a gradient but is not implemented yet.");
}
};
C10_DECLARE_REGISTRY(
GradientRegistry,
GradientMakerBase,
const OperatorDef&,
const vector<GradientWrapper>&);
#ifdef CAFFE2_NO_GRADIENT_OPS
#define REGISTER_GRADIENT(name, ...) /* No gradients. */
#define REGISTER_GRADIENT_STR(str_name, ...) /* No gradients. */
#else
#define REGISTER_GRADIENT(name, ...) \
C10_REGISTER_CLASS(GradientRegistry, name, __VA_ARGS__)
#define REGISTER_GRADIENT_STR(str_name, ...) \
C10_REGISTER_TYPED_CLASS(GradientRegistry, str_name, __VA_ARGS__)
#endif
// NO_GRADIENT means that the operator does not need any gradient computation.
#define NO_GRADIENT(name) REGISTER_GRADIENT(name, NoGradient)
// SHOULD_NOT_DO_GRADIENT means that the operator is not designed to have
// gradient operators. If you attempt to call the gradient, a log fatal will
// occur.
#define SHOULD_NOT_DO_GRADIENT(name) \
REGISTER_GRADIENT(name, ThrowInTheTowelIfGradientIsCalled)
#define GRADIENT_NOT_IMPLEMENTED_YET(name) \
REGISTER_GRADIENT(name, GradientNotImplementedYet)
/**
* @brief Gets the GradientOpsMeta for the given operator def.
*/
TORCH_API GradientOpsMeta GetGradientForOp(
const OperatorDef& def,
const vector<GradientWrapper>& g_output);
} // namespace caffe2
#endif // CAFFE2_CORE_OPERATOR_GRADIENT_H_
| 10,222
| 29.245562
| 80
|
h
|
null |
pytorch-main/caffe2/core/operator_schema.h
|
#ifndef CAFFE2_CORE_OPERATOR_SCHEMA_H_
#define CAFFE2_CORE_OPERATOR_SCHEMA_H_
#include <climits>
#include <functional>
#include <initializer_list>
#include <ostream>
#include <set>
#include <unordered_map>
#include <vector>
#include <c10/util/irange.h>
#include <c10/util/Registry.h>
#include <caffe2/core/common.h>
#include <caffe2/core/logging.h>
#include <caffe2/core/types.h>
#include <caffe2/proto/caffe2_pb.h>
#include <caffe2/utils/filler.h>
#include <caffe2/utils/proto_utils.h>
namespace caffe2 {
// A const value returned by OpSchema::CalculateOutput() if the number of
// output cannot be determined.
constexpr int kCannotComputeNumOutputs = -1;
/**
* @brief A class to record the schema of an op.
*
* OpSchema records the common interface of an op specified by its name. This
* is optional for each operator implemented in Caffe2 but is strongly
* recommended.
*
* To register an OpSchema, one can use the macro OPERATOR_SCHEMA(name) and
* then append the various functions in the class. For example, for an op
* that takes in two inputs, one output, and the first input and output
* could be in-place, can be written as
*
* OPERATOR_SCHEMA(name)
* .NumInputs(2).NumOutputs(1).AllowInplace({{0, 0}});
*/
class TORCH_API OpSchema {
public:
OpSchema() : OpSchema("unknown", "unknown", 0) {}
OpSchema(const string& type, const string& file, const int line);
/**
* @brief Returns the file that the op schema is registered from.
*/
inline const string& file() const {
return file_;
}
/**
* @brief Returns the line in file that the op schema is registered from.
*/
inline int line() const {
return line_;
}
/**
* @brief Returns the docstring of the op schema.
*/
inline const char* doc() const {
return doc_.empty() ? nullptr : doc_.c_str();
}
/**
* @brief Verifies if an operator definition protobuf matches the pattern
* specified in the schema.
*/
bool Verify(const OperatorDef& def) const;
// Functions to set the property of the operator schemas.
// Sets the number of inputs, either a fixed number or a min and a max.
/**
* @brief A single input.
*/
OpSchema& NumInputs(int n);
/**
* @brief Input could be in range [min, max], inclusive.
*/
OpSchema& NumInputs(int min, int max);
/**
* @brief Input could be one of the values specified in allowed_input_nums.
*/
OpSchema& NumInputs(set<int> allowed_input_nums);
/**
* @brief Input is checked with a specified function.
*/
OpSchema& NumInputs(std::function<bool(int)> func);
// Sets the number of outputs, either a fixed number, a min and a max,
// or a function that takes in the input number and produces an output
// number. Use only one function in the set below.
/**
* @brief A single output.
*/
OpSchema& NumOutputs(int n);
/**
* @brief Output could be in range [min, max], inclusive.
*/
OpSchema& NumOutputs(int min, int max);
/**
* @brief Output could be one of the values specified in allowed_output_nums.
*/
OpSchema& NumOutputs(set<int> allowed_output_nums);
/**
* @brief Output is checked with a specified function.
*/
OpSchema& NumOutputs(std::function<bool(int)> func);
/**
* @brief Relationship between inputs and outputs is checked with a specified
* function.
*/
OpSchema& NumInputsOutputs(std::function<bool(int, int)> func);
// Set the function that can calculate the number of output based on the
// number of input. Use only one function in the set below.
/**
* @brief Set the output calculator to a user-defined function.
*/
OpSchema& OutputCalculator(std::function<int(int)> calc);
/**
* @brief Set the number of outputs to be the same as the number of inputs.
*/
OpSchema& SameNumberOfOutput();
// Sets the rule to allow optional in-place operation.
OpSchema& AllowInplace(std::function<bool(int, int)> inplace);
OpSchema& AllowInplace(set<std::pair<int, int>> inplace);
OpSchema& AllowOneToOneInplace();
// Sets the rule to enforce in-place operation.
OpSchema& EnforceInplace(std::function<bool(int, int)> inplace);
OpSchema& EnforceInplace(set<std::pair<int, int>> inplace);
OpSchema& EnforceOneToOneInplace();
// Functions to deal with type and shape inference. Basically, this registers
// a function that takes in an OperatorDef and a series of input type and
// shape specified by TensorProto objects (whose data fields are empty), and
// produces a series of output type and shape.
typedef std::function<
vector<TensorShape>(const OperatorDef&, const vector<TensorShape>&)>
TensorInferenceFunctionType;
/**
* @brief Sets the tensor inference function, which is a std::function object
* defined in operator_schema.h.
*/
OpSchema& TensorInferenceFunction(TensorInferenceFunctionType function);
/**
* A wrapper that makes an infer tensor function to return unknown
* shape for all outputs if any one of the inputs has unknown shape
*/
static TensorInferenceFunctionType NeedsAllInputShapes(
TensorInferenceFunctionType f);
/**
* @brief Sets the corresponding onnx schema name
*/
OpSchema& InheritOnnxSchema(const std::string& onnx_schema_name);
/**
* @brief Shortcut to InheritOnnxSchema(type_)
*/
OpSchema& InheritOnnxSchema() {
return InheritOnnxSchema(type_);
}
/**
* @brief Sets the tensor inference function to produce the same output as
* the input.
*/
OpSchema& IdenticalTypeAndShape();
OpSchema& IdenticalTypeAndShapeOfInput(int idx);
OpSchema& IdenticalTypeAndShapeOfInputDim(int idx, int dim);
OpSchema& IdenticalTypeAndShapeOfMultipleInputs(const vector<int>& indices);
OpSchema& ScalarType(::caffe2::TensorProto_DataType dt);
/**
* @brief A function to allow one to infer the type and shape from the op
* schema.
*/
inline vector<TensorShape> InferTensor(
const OperatorDef& def,
const vector<TensorShape>& input_type_shape) const {
CAFFE_ENFORCE(
Verify(def),
"(InferTensor) Operator def did not pass schema checking: ",
ProtoDebugString(def));
return tensor_inference_function_(def, input_type_shape);
}
/*
* @brief A struct to store various cost information about
* an operator such as FLOPs, total memory use and parameters.
*/
struct Cost {
uint64_t flops{0}; // Floating point operations.
uint64_t bytes_read{0}; // Total memory read.
uint64_t bytes_written{0}; // Total memory written.
uint64_t params_bytes{0}; // Memory read for parameters.
};
/**
* @brief Registers a function that takes in an OperatorDef
* and a series of input shapes and returns the total "cost"
* required to run the operator via struct by value.
*/
typedef std::function<
struct Cost(const OperatorDef&, const vector<TensorShape>&)>
CostInferenceFunctionType;
/**
* @brief Register the Cost inference function.
*/
OpSchema& CostInferenceFunction(CostInferenceFunctionType function);
#if 0 // def _MSC_VER
/**
* @brief Register the Cost inference function via a pointer.
*/
template <typename T,
typename = std::enable_if<
std::is_same<CostInferenceFunctionType&&, T>:value
>:type>
inline OpSchema& CostInferenceFunction(T func) {
// Note: This is here in order to resolve an MSVC compiler issue: it
// does not automatically convert a function pointer to a std::function,
// and needs an explicit conversion.
return CostInferenceFunction(CostInferenceFunctionType(func));
}
#endif // _MSC_VER
bool HasCostInferenceFunction() const {
return !!cost_inference_function_;
}
inline struct Cost InferCost(
const OperatorDef& def,
const vector<TensorShape>& input_tensor_shape) const {
CAFFE_ENFORCE(
cost_inference_function_, "Cost inference function not defined.");
return (*cost_inference_function_)(def, input_tensor_shape);
}
// Functions to do documentation for the operator schema.
OpSchema& SetDoc(const string& doc);
struct Argument {
Argument(const char* name, const char* description, bool required)
: name_{name}, description_{description}, required_{required} {}
const char* name() const {
return name_;
}
const char* description() const {
return description_;
}
bool is_required() const {
return required_;
}
private:
const char* name_;
const char* description_;
const bool required_;
};
OpSchema&
Arg(const char* name, const char* description, bool required = false);
#define DECLARE_STANDARD_ARG(name, str) \
static const char* Arg_##name; \
OpSchema& Arg##name(const char* description);
DECLARE_STANDARD_ARG(IsTest, is_test)
#undef DECLARE_STANDARD_ARG
OpSchema& Input(const int n, const char* name, const char* description);
OpSchema& Output(const int n, const char* name, const char* description);
// Calls the passed function with `this` as an argument. Useful for
// adding docs for templated/macro ops.
OpSchema& FillUsing(std::function<void(OpSchema&)> populator);
// Remove from documentation
OpSchema& Private();
// This op can pass data across devices
OpSchema& InputsCanCrossDevices();
/**
* @brief A function to allow one to get the number of outputs based on the
* number of inputs, if this schema supports it.
*/
int CalculateOutput(int num_input) const;
const std::string& onnx_schema() const {
return onnx_schema_;
}
int min_input() const {
return min_input_;
}
int max_input() const {
return max_input_;
}
int min_output() const {
return min_output_;
}
int max_output() const {
return max_output_;
}
bool num_inputs_allowed(int x) const {
return num_inputs_allowed_(x);
}
bool num_outputs_allowed(int x) const {
return num_outputs_allowed_(x);
}
bool num_inputs_outputs_allowed(int x, int y) const {
return num_inputs_outputs_allowed_(x, y);
}
int inf() const {
return std::numeric_limits<int>::max();
}
bool inplace_enforced(int x, int y) const {
return inplace_enforced_(x, y);
}
TORCH_API friend std::ostream& operator<<(
std::ostream& out,
const OpSchema& schema);
const std::vector<Argument>& args() const {
return args_;
}
const std::vector<std::pair<const char*, const char*>>& input_desc() const {
return input_desc_;
}
const std::vector<std::pair<const char*, const char*>>& output_desc() const {
return output_desc_;
}
bool private_op() {
return private_;
}
bool inputs_can_cross_devices() const {
return inputs_can_cross_devices_;
}
/**
* @brief Returns the required device location of inputs and outputs.
*/
using DeviceInferenceFunctionType = std::function<
std::pair<std::vector<DeviceOption>, std::vector<DeviceOption>>(
const OperatorDef& def)>;
OpSchema& DeviceInferenceFunction(DeviceInferenceFunctionType function);
/**
* @brief Infer required device location of an op's inputs and outputs
*/
inline std::pair<std::vector<DeviceOption>, std::vector<DeviceOption>>
InferDevice(const OperatorDef& def) const {
return device_inference_function_(def);
}
// The helper is build sparse input with values, keys, weights and lengths;
// e.g.:
// values = [1, 2, 3, 2, 4, 6, 7, 3, 6]
// keys = [0, 1, 4, 0, 1, 2, 5, 1, 2]
// weights = [1, 2, 3, 4, 5, 6, 7, 8, 9]
// \_____/ \________/ \__/
// lengths = [3, 4, 2]
OpSchema& WeightedValueKeyLengthInputFillers(
size_t value_index,
size_t key_index,
size_t length_index,
size_t weight_index);
// The helper is build sparse input with values, keys, weights and lengths;
// e.g.:
// values = [1, 2, 3, 2, 4, 6, 7, 3, 6]
// keys = [0, 1, 4, 0, 1, 2, 5, 1, 2]
// \_____/ \________/ \__/
// lengths = [3, 4, 2]
OpSchema& ValueKeyLengthInputFillers(
size_t value_index,
size_t key_index,
size_t length_index);
// The helper is build sparse input with values and lengths; e.g.:
// values = [1, 2, 3, 2, 4, 6, 7, 3, 6]
// \_____/ \________/ \__/
// lengths = [3, 4, 2]
OpSchema& ValueLengthInputFillers(size_t value_index, size_t length_index);
OpSchema& DisallowInputFillers();
std::vector<TensorFiller> InputFillers(
const std::vector<std::vector<int64_t>>& shapes) const;
private:
std::vector<TensorFiller> SupplyDenseFillers(
const std::vector<std::vector<int64_t>>& shapes);
private:
string type_;
string file_;
string doc_;
string onnx_schema_;
std::vector<Argument> args_{};
std::vector<std::pair<const char*, const char*>> input_desc_{};
std::vector<std::pair<const char*, const char*>> output_desc_{};
int line_ = 0;
int min_input_ = 0;
int max_input_ = std::numeric_limits<int>::max();
int min_output_ = 0;
int max_output_ = std::numeric_limits<int>::max();
bool private_ = false;
bool inputs_can_cross_devices_ = false;
std::function<bool(int)> num_inputs_allowed_ = [](int) { return true; };
std::function<bool(int)> num_outputs_allowed_ = [](int) { return true; };
std::function<bool(int, int)> num_inputs_outputs_allowed_ = [](int, int) {
return true;
};
std::function<int(int)> calculate_output_;
// In default, any in-place operation is neither allowed nor enforced.
std::function<bool(int, int)> inplace_allowed_ = [](int, int) {
return false;
};
std::function<bool(int, int)> inplace_enforced_ = [](int, int) {
return false;
};
TensorInferenceFunctionType tensor_inference_function_;
std::unique_ptr<CostInferenceFunctionType> cost_inference_function_ = nullptr;
DeviceInferenceFunctionType device_inference_function_;
std::function<std::vector<TensorFiller>(
const std::vector<std::vector<int64_t>>&)>
filler_supplier_ =
[this](const std::vector<std::vector<int64_t>>& shapes) {
return SupplyDenseFillers(shapes);
};
};
/**
* @brief A registry to hold all the operator schemas.
*/
class TORCH_API OpSchemaRegistry {
public:
static OpSchema&
NewSchema(const string& key, const string& file, const int line);
static const OpSchema* Schema(const string& key) {
auto& m = map();
auto it = m.find(key);
if (it != m.end()) {
return &it->second;
} else {
return nullptr;
}
}
private:
// OpSchemaRegistry should not need to be instantiated.
OpSchemaRegistry() = delete;
/**
* @brief Returns the underlying string to OpSchema map.
*
* You should not manually manipulate the map object returned. Instead, use
* the macros defined such as OPERATOR_SCHEMA to register your operator
* schema.
*
* We wrap it inside a function to avoid the static initialization order
* fiasco.
*/
static CaffeMap<string, OpSchema>& map();
};
// Helper function for creating simple tensorproto with dimension and type
template <typename T_I = int>
inline TensorShape CreateTensorShape(
vector<T_I> dims,
::caffe2::TensorProto_DataType dt) {
TensorShape ts;
for (T_I d : dims) {
ts.add_dims(d);
}
ts.set_data_type(dt);
return ts;
}
// Helper function
inline vector<int64_t> GetDimsVector(const TensorShape& shape) {
vector<int64_t> dims;
for (auto d : shape.dims()) {
dims.push_back(d);
}
return dims;
}
// Helper function
inline uint64_t nElemFromDim(const TensorShape& X, int dim = 0) {
CAFFE_ENFORCE_GE(dim, 0, "Invalid maximum index specified");
uint64_t nElem = 1;
for (const auto i : c10::irange(dim, X.dims_size())) {
nElem *= X.dims(i);
}
return nElem;
}
// Helper function
inline uint64_t nElemBetweenDim(const TensorShape& X, int start, int stop) {
CAFFE_ENFORCE_GE(start, 0, "Invalid maximum index specified");
CAFFE_ENFORCE_LE(stop, X.dims_size(), "Invalid maximum index specified");
uint64_t nElem = 1;
for (const auto i : c10::irange(start, stop)) {
nElem *= X.dims(i);
}
return nElem;
}
// Helper function for infer op inputs and outputs device information.
inline std::pair<std::vector<DeviceOption>, std::vector<DeviceOption>>
InferOpInputOutputDevice(const OperatorDef& op) {
auto op_schema = OpSchemaRegistry::Schema(op.type());
if (op_schema) {
// op_schema found
return op_schema->InferDevice(op);
} else {
// No schema for op.type registered
auto temp_schema = OpSchema();
return temp_schema.InferDevice(op);
}
}
template <uint64_t OpsPerPoint>
OpSchema::Cost PointwiseCostInference(
const OperatorDef& /* unused */,
const vector<TensorShape>& inputs) {
struct OpSchema::Cost c;
const TensorShape X = inputs[0];
uint64_t nElemX = nElemFromDim(X);
uint64_t nElemRead = 0;
for (const auto i : c10::irange(inputs.size())) {
nElemRead += nElemFromDim(inputs[i]);
}
c.flops = nElemX * OpsPerPoint;
auto const& X_element_size_byte =
DataTypeToTypeMeta(X.data_type()).itemsize();
c.bytes_read = nElemRead * X_element_size_byte;
c.bytes_written = nElemX * X_element_size_byte;
return c;
}
} // namespace caffe2
#if defined(_MSC_VER)
#define EXPORT_IF_NOT_MSVC
#else
#define EXPORT_IF_NOT_MSVC C10_EXPORT
#endif
#ifndef CAFFE2_NO_OPERATOR_SCHEMA
#define OPERATOR_SCHEMA(name) \
EXPORT_IF_NOT_MSVC void CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name(){}; \
static OpSchema* C10_ANONYMOUS_VARIABLE(name) CAFFE2_UNUSED = \
&OpSchemaRegistry::NewSchema(#name, __FILE__, __LINE__)
#else // CAFFE2_NO_OPERATOR_SCHEMA
#define OPERATOR_SCHEMA(name) \
EXPORT_IF_NOT_MSVC void CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name(){}; \
static OpSchema* C10_ANONYMOUS_VARIABLE(name) CAFFE2_UNUSED = \
1 ? nullptr : &OpSchemaRegistry::NewSchema(#name, __FILE__, __LINE__)
#endif // CAFFE2_NO_OPERATOR_SCHEMA
#ifdef CAFFE2_NO_GRADIENT_OPS
#define GRADIENT_OPERATOR_SCHEMA(name) \
EXPORT_IF_NOT_MSVC void CAFFE2_PLEASE_ADD_OPERATOR_SCHEMA_FOR_##name(){}; \
static OpSchema* C10_ANONYMOUS_VARIABLE(name) CAFFE2_UNUSED = \
1 ? nullptr : &OpSchemaRegistry::NewSchema(#name, __FILE__, __LINE__)
#else
#define GRADIENT_OPERATOR_SCHEMA(name) OPERATOR_SCHEMA(name)
#endif
#endif // CAFFE2_CORE_OPERATOR_SCHEMA_H_
| 18,533
| 29.23491
| 80
|
h
|
null |
pytorch-main/caffe2/core/prof_dag_counters.h
|
#ifndef PROF_DAG_COUNTERS_H
#define PROF_DAG_COUNTERS_H
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/timer.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/proto/prof_dag.pb.h"
#include <unordered_map>
namespace caffe2 {
class ProfDAGStats {
public:
ProfDAGStats() : sum_(0.0), sqrsum_(0.0), cnt_(0) {}
explicit ProfDAGStats(float time_ms)
: sum_(time_ms), sqrsum_(time_ms * time_ms), cnt_(1) {}
ProfDAGStats& operator+=(const ProfDAGStats& rhs) {
sum_ += rhs.sum_;
sqrsum_ += rhs.sqrsum_;
cnt_ += rhs.cnt_;
return *this;
}
std::pair<float, float> computeMoments() const {
CAFFE_ENFORCE_GT(cnt_, 0U);
float mean = sum_ / cnt_;
float stddev = std::sqrt(std::abs(sqrsum_ / cnt_ - mean * mean));
return {mean, stddev};
}
float sum() const {
return sum_;
}
float sqrsum() const {
return sqrsum_;
}
size_t cnt() const {
return cnt_;
}
private:
float sum_;
float sqrsum_;
size_t cnt_;
};
class ProfDAGReport {
public:
friend class ProfDAGCounters;
// Collects the execution time per each operator type
ProfDAGProtos GetOperatorStats() const;
// Collects the execution time of each operator, the output is
// formatted as a map: (netName__opIndex__opType, cost)
ProfDAGProtos GetPerOperatorCost() const;
ProfDAGReport& operator+=(const ProfDAGReport& rhs);
void PrintStats();
private:
ProfDAGProto statsProto(
const std::string& name,
const ProfDAGStats& stats,
const std::vector<std::string>& op_extra_info) const;
bool hasStats() const;
std::vector<std::string> op_types_;
std::vector<std::vector<std::string>> op_extra_info_;
std::string net_name_;
int num_runs_;
// Cumulative stats per operator instance of the net
std::vector<ProfDAGStats> time_per_op_total_;
// Cumulative stats per unique operator type
CaffeMap<std::string, ProfDAGStats> time_per_op_type_total_;
CaffeMap<std::string, ProfDAGStats> times_per_run_per_type_total_;
ProfDAGStats runtime_stats_;
};
/**
* A simple wrapper around prof_dag's counters
*/
class ProfDAGCounters {
public:
explicit ProfDAGCounters(const std::shared_ptr<const NetDef>& net_def);
// ReportRunStart/End are called at the beginning and at the end of
// each net's run
void ReportRunStart();
void ReportRunEnd();
void AddPerOpStartTime(size_t op_id);
void AddPerOpEndTime(size_t op_id);
void AddPerOpAsyncEndTime(size_t op_id);
ProfDAGReport GetReport() const;
private:
Timer timer_;
std::vector<float> op_start_times_run_;
std::vector<float> op_end_times_run_;
std::vector<float> op_async_end_times_run_;
ProfDAGReport report_;
};
} // namespace caffe2
#endif
| 2,751
| 21.933333
| 73
|
h
|
null |
pytorch-main/caffe2/core/qtensor.h
|
#ifndef CAFFE2_CORE_QTENSOR_H_
#define CAFFE2_CORE_QTENSOR_H_
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/tensor.h"
#include <c10/util/accumulate.h>
#include <c10/util/irange.h>
#include <c10/util/typeid.h>
#include <algorithm>
#include <climits>
#include <cstddef>
#include <vector>
namespace caffe2 {
template <class Context>
class C10_EXPORT QTensor {
public:
QTensor() {}
virtual ~QTensor() {}
/**
* @brief Creates a quantized tensor of the given dimension.
*
* Note that the actual data allocation is not going to be carried out until
* the first time mutable_data() is called.
*
* The underlying storage of the quantized tensor interleaves elements
* by bit depth.
*
* Labeled memory for tensor of size 6, precision 3
* [ E1[0] E2[0] E3[0] E4[0] E5[0] E6[0] ] // Least significant Bits
* [ E1[1] E2[1] E3[1] E4[1] E5[1] E6[1] ]
* [ E1[2] E2[2] E3[2] E4[2] E5[2] E6[2] ]
*
* In the case of sign bits (see enable_sign argument), an extra bit
* per element is added:
*
* Labeled memory for tensor of size 6, precision 3, sign bit enabled
* [ E1[0] E2[0] E3[0] E4[0] E5[0] E6[0] ]
* [ E1[1] E2[1] E3[1] E4[1] E5[1] E6[1] ]
* [ E1[2] E2[2] E3[2] E4[2] E5[2] E6[2] ]
* [ E1[s] E2[s] E3[s] E4[s] E5[s] E6[s] ]
* Where 's' is 1 if E is negative
*
* The reason for this layout is the ability to efficiently multiply
* many low precision integers as a sum of popcnt(A & B) * 1 << bit.
* Explained here: https://arxiv.org/abs/1606.06160
*/
// TODO: changing at::ArrayRef<int> to at::ArrayRef<int64_t>?
explicit QTensor(
at::ArrayRef<int> dims,
const unsigned char precision,
const bool signbit = false)
: precision_(precision), signed_(signbit) {
Resize(dims);
}
void Resize(at::ArrayRef<int> dim_source) {
if (dims_ != dim_source) {
const auto source_size = c10::multiply_integers(dim_source);
if (static_cast<size_t>(source_size * (precision_ + signed_)) > capacity_) {
data_ptr_.clear();
capacity_ = 0;
}
dims_ = dim_source.vec();
size_ = source_size;
}
}
void
SetBitAtIndex(const unsigned char bit, const size_t index, const bool value) {
// Get the mutable data at bit depth `bit`.
unsigned char* d = mutable_data();
CAFFE_ENFORCE(
bit < precision_ + signed_,
"Attempted to a set a bit that is not allocated.");
CAFFE_ENFORCE(bit * aligned_size() < capacity_);
auto idx = (aligned_size() * bit) / CHAR_BIT;
d = &d[idx];
idx = index / CHAR_BIT;
auto shift = CHAR_BIT - (index % CHAR_BIT) - 1;
if (value) {
d[idx] |= 1 << shift;
} else {
d[idx] &= ~(1 << shift);
}
}
bool GetBitAtIndex(const unsigned char bit, const size_t index) const {
// Get the data at bit depth `bit`
const unsigned char* d = data();
auto idx = (aligned_size() * bit) / CHAR_BIT;
d = &d[idx];
idx = index / CHAR_BIT;
auto shift = CHAR_BIT - (index % CHAR_BIT) - 1;
return d[idx] & (1 << shift);
}
void SetPrecision(const unsigned char precision) {
precision_ = precision;
data_ptr_.clear();
}
void SetSigned(const bool make_signed = true) {
signed_ = make_signed;
data_ptr_.clear();
}
void SetScale(const double scale) {
scale_ = scale;
}
void SetBias(const double bias) {
bias_ = bias;
}
unsigned char* mutable_data() {
if (!data_ptr_) {
data_ptr_ = Context::New(nbytes());
capacity_ = nbytes() * CHAR_BIT;
}
CAFFE_ENFORCE(capacity_ == nbytes() * CHAR_BIT);
return static_cast<unsigned char*>(data_ptr_.get());
}
inline const unsigned char* data() const {
return static_cast<unsigned char*>(data_ptr_.get());
}
inline size_t size() const {
return size_;
}
inline unsigned char alignment() const {
return alignment_;
}
inline unsigned char precision() const {
return precision_;
}
inline at::ArrayRef<int> sizes() const {
return dims_;
}
// TODO: deprecate?
inline at::ArrayRef<int> dims() const {
return dims_;
}
inline bool is_signed() const {
return signed_;
}
/**
* Returns the number of dimensions of the data.
*/
inline int ndim() const {
return dims_.size();
}
inline size_t aligned_size() const {
return alignment_ * ((size_ + alignment_ - 1) / alignment_);
}
inline size_t nbytes() const {
return (aligned_size() * (precision_ + signed_)) / CHAR_BIT;
}
inline double scale() const {
return scale_;
}
inline double bias() const {
return bias_;
}
/**
* Returns the i-th dimension of the qtensor in int.
*/
inline int dim32(const int i) const {
TORCH_DCHECK_LT(i, static_cast<int>(dims_.size())) << "Exceeding ndim limit " << dims_.size();
TORCH_DCHECK_GE(i, 0) << "Cannot have negative index";
CAFFE_ENFORCE_LT(dims_[i], std::numeric_limits<int>::max());
return static_cast<int>(dims_[i]);
}
/**
* Returns the 'canonical' version of a (usually) user-specified axis,
* allowing for negative indexing (e.g., -1 for the last axis).
*
* @param axis_index the axis index.
* If 0 <= index < ndim(), return index.
* If -ndim <= index <= -1, return (ndim() - (-index)),
* e.g., the last axis index (ndim() - 1) if index == -1,
* the second to last if index == -2, etc.
* Dies on out of range index.
*/
inline int canonical_axis_index(int axis_index) const {
CAFFE_ENFORCE_GE(axis_index, -ndim());
CAFFE_ENFORCE_LT(axis_index, ndim());
if (axis_index < 0) {
return axis_index + ndim();
}
return axis_index;
}
/**
* Return product of all dimensions starting from K.
*/
inline int64_t size_from_dim(int k) const {
int64_t r = 1;
for (const auto i : c10::irange(k, dims_.size())) {
r *= dims_[i];
}
return r;
}
/**
* Product of all dims up to.
*/
inline int64_t size_to_dim(int k) const {
CAFFE_ENFORCE(k < dims_.size());
int64_t r = 1;
for (const auto i : c10::irange(k)) {
r *= dims_[i];
}
return r;
}
protected:
std::vector<int> dims_;
size_t size_ = 0;
// Precision in bits.
unsigned char precision_ = CHAR_BIT;
// Bit alignment.
unsigned char alignment_ = CHAR_BIT;
// Allocated data.
at::DataPtr data_ptr_;
// value = scale_ * (x + bias_)
double scale_;
double bias_;
bool signed_ = false;
// Capacity in bits.
size_t capacity_ = 0;
};
} // namespace caffe2
#endif // CAFFE2_CORE_QTENSOR_H_
| 6,657
| 24.412214
| 98
|
h
|
null |
pytorch-main/caffe2/core/qtensor_serialization.h
|
#ifndef CAFFE2_CORE_QTENSOR_SERIALIZATION_H_
#define CAFFE2_CORE_QTENSOR_SERIALIZATION_H_
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/qtensor.h"
namespace caffe2 {
constexpr auto kQTensorBlobQType = "QTensor";
template <class Context>
class QTensorSerializer : public BlobSerializerBase {
public:
QTensorSerializer() : context_() {}
~QTensorSerializer() override {}
/**
* Serializes a Blob. Note that this blob has to contain QTensor<Context>.
*/
void Serialize(
const void* pointer,
TypeMeta typeMeta,
const string& name,
SerializationAcceptor acceptor) override;
private:
Context context_;
};
template <class Context>
class QTensorDeserializer : public BlobDeserializerBase {
public:
void Deserialize(const BlobProto& proto, Blob* blob) override;
void Deserialize(const QTensorProto& proto, QTensor<Context>* tensor);
};
template <class Context>
void QTensorSerializer<Context>::Serialize(
const void* pointer,
TypeMeta typeMeta,
const string& name,
BlobSerializerBase::SerializationAcceptor acceptor) {
CAFFE_ENFORCE(typeMeta.Match<QTensor<Context>>());
const auto& qtensor = *static_cast<const QTensor<Context>*>(pointer);
BlobProto blob_proto;
blob_proto.set_name(name);
blob_proto.set_type(kQTensorBlobQType);
QTensorProto& proto = *blob_proto.mutable_qtensor();
proto.set_name(name);
for (const auto i : c10::irange(qtensor.ndim())) {
proto.add_dims(qtensor.dim32(i));
}
proto.set_precision(qtensor.precision());
proto.set_scale(qtensor.scale());
proto.set_bias(qtensor.bias());
proto.set_is_signed(qtensor.is_signed());
detail::CopyToProtoWithCast(
qtensor.nbytes(), qtensor.data(), proto.mutable_data(), &this->context_);
acceptor(name, SerializeBlobProtoAsString_EnforceCheck(blob_proto));
}
template <class Context>
void QTensorDeserializer<Context>::Deserialize(
const BlobProto& blob_proto,
Blob* blob) {
Deserialize(blob_proto.qtensor(), blob->GetMutable<QTensor<Context>>());
}
template <class Context>
void QTensorDeserializer<Context>::Deserialize(
const QTensorProto& proto,
QTensor<Context>* qtensor) {
Context context{};
vector<int> dims;
for (const int d : proto.dims()) {
dims.push_back(d);
}
qtensor->Resize(dims);
qtensor->SetPrecision(proto.precision());
qtensor->SetScale(proto.scale());
qtensor->SetBias(proto.bias());
qtensor->SetSigned(proto.is_signed());
detail::CopyFromProtoWithCast(
qtensor->nbytes(), proto.data(), qtensor->mutable_data(), &context);
}
} // namespace caffe2
#endif // CAFFE2_CORE_QTENSOR_SERIALIZATION_H_
| 2,641
| 28.355556
| 79
|
h
|
null |
pytorch-main/caffe2/core/scope_guard.h
|
/**
* Copyright 2016 Facebook
* @author Tudor Bosman (tudorb@fb.com)
*/
#pragma once
#include <cstddef>
#include <functional>
#include <new>
#include <type_traits>
#include <utility>
namespace caffe2 {
// Copied from folly/ScopeGuard.h
namespace detail {
class ScopeGuardImplBase {
public:
void dismiss() noexcept {
dismissed_ = true;
}
protected:
ScopeGuardImplBase() noexcept : dismissed_(false) {}
static ScopeGuardImplBase makeEmptyScopeGuard() noexcept {
return ScopeGuardImplBase{};
}
template <typename T>
static const T& asConst(const T& t) noexcept {
return t;
}
bool dismissed_;
};
template <typename FunctionType>
class ScopeGuardImpl : public ScopeGuardImplBase {
public:
explicit ScopeGuardImpl(FunctionType& fn) noexcept(
std::is_nothrow_copy_constructible<FunctionType>::value)
: ScopeGuardImpl(
asConst(fn),
makeFailsafe(std::is_nothrow_copy_constructible<FunctionType>{},
&fn)) {}
explicit ScopeGuardImpl(const FunctionType& fn) noexcept(
std::is_nothrow_copy_constructible<FunctionType>::value)
: ScopeGuardImpl(
fn,
makeFailsafe(std::is_nothrow_copy_constructible<FunctionType>{},
&fn)) {}
explicit ScopeGuardImpl(FunctionType&& fn) noexcept(
std::is_nothrow_move_constructible<FunctionType>::value)
: ScopeGuardImpl(
std::move_if_noexcept(fn),
makeFailsafe(std::is_nothrow_move_constructible<FunctionType>{},
&fn)) {}
ScopeGuardImpl(ScopeGuardImpl&& other) noexcept(
std::is_nothrow_move_constructible<FunctionType>::value)
: function_(std::move_if_noexcept(other.function_)) {
// If the above line attempts a copy and the copy throws, other is
// left owning the cleanup action and will execute it (or not) depending
// on the value of other.dismissed_. The following lines only execute
// if the move/copy succeeded, in which case *this assumes ownership of
// the cleanup action and dismisses other.
dismissed_ = other.dismissed_;
other.dismissed_ = true;
}
~ScopeGuardImpl() noexcept {
if (!dismissed_) {
execute();
}
}
private:
static ScopeGuardImplBase makeFailsafe(std::true_type, const void*) noexcept {
return makeEmptyScopeGuard();
}
template <typename Fn>
static auto makeFailsafe(std::false_type, Fn* fn) noexcept
-> ScopeGuardImpl<decltype(std::ref(*fn))> {
return ScopeGuardImpl<decltype(std::ref(*fn))>{std::ref(*fn)};
}
template <typename Fn>
explicit ScopeGuardImpl(Fn&& fn, ScopeGuardImplBase&& failsafe)
: ScopeGuardImplBase{}, function_(std::forward<Fn>(fn)) {
failsafe.dismiss();
}
void* operator new(std::size_t) = delete;
void execute() noexcept { function_(); }
FunctionType function_;
};
template <typename F>
using ScopeGuardImplDecay = ScopeGuardImpl<typename std::decay<F>::type>;
} // namespace detail
/**
* ScopeGuard is a general implementation of the "Initialization is
* Resource Acquisition" idiom. Basically, it guarantees that a function
* is executed upon leaving the current scope unless otherwise told.
*
* The MakeGuard() function is used to create a new ScopeGuard object.
* It can be instantiated with a lambda function, a std::function<void()>,
* a functor, or a void(*)() function pointer.
*
*
* Usage example: Add a friend to memory iff it is also added to the db.
*
* void User::addFriend(User& newFriend) {
* // add the friend to memory
* friends_.push_back(&newFriend);
*
* // If the db insertion that follows fails, we should
* // remove it from memory.
* auto guard = MakeGuard([&] { friends_.pop_back(); });
*
* // this will throw an exception upon error, which
* // makes the ScopeGuard execute UserCont::pop_back()
* // once the Guard's destructor is called.
* db_->addFriend(GetName(), newFriend.GetName());
*
* // an exception was not thrown, so don't execute
* // the Guard.
* guard.dismiss();
* }
*
* Examine ScopeGuardTest.cpp for some more sample usage.
*
* Stolen from:
* Andrei's and Petru Marginean's CUJ article:
* http://drdobbs.com/184403758
* and the loki library:
* http://loki-lib.sourceforge.net/index.php?n=Idioms.ScopeGuardPointer
* and triendl.kj article:
* http://www.codeproject.com/KB/cpp/scope_guard.aspx
*/
template <typename F>
detail::ScopeGuardImplDecay<F> MakeGuard(F&& f) noexcept(
noexcept(detail::ScopeGuardImplDecay<F>(static_cast<F&&>(f)))) {
return detail::ScopeGuardImplDecay<F>(static_cast<F&&>(f));
}
} // namespaces
| 4,675
| 28.408805
| 80
|
h
|
null |
pytorch-main/caffe2/core/static_tracepoint_elfx86.h
|
#pragma once
// Default constraint for the probe arguments as operands.
#ifndef CAFFE_SDT_ARG_CONSTRAINT
#define CAFFE_SDT_ARG_CONSTRAINT "nor"
#endif
// Instruction to emit for the probe.
#define CAFFE_SDT_NOP nop
// Note section properties.
#define CAFFE_SDT_NOTE_NAME "stapsdt"
#define CAFFE_SDT_NOTE_TYPE 3
// Size of address depending on platform.
#ifdef __LP64__
#define CAFFE_SDT_ASM_ADDR .8byte
#else
#define CAFFE_SDT_ASM_ADDR .4byte
#endif
// Assembler helper Macros.
#define CAFFE_SDT_S(x) #x
#define CAFFE_SDT_ASM_1(x) CAFFE_SDT_S(x) "\n"
#define CAFFE_SDT_ASM_2(a, b) CAFFE_SDT_S(a) "," CAFFE_SDT_S(b) "\n"
#define CAFFE_SDT_ASM_3(a, b, c) CAFFE_SDT_S(a) "," CAFFE_SDT_S(b) "," \
CAFFE_SDT_S(c) "\n"
#define CAFFE_SDT_ASM_STRING(x) CAFFE_SDT_ASM_1(.asciz CAFFE_SDT_S(x))
// Helper to determine the size of an argument.
#define CAFFE_SDT_ISARRAY(x) (__builtin_classify_type(x) == 14)
#define CAFFE_SDT_ARGSIZE(x) (CAFFE_SDT_ISARRAY(x) ? sizeof(void*) : sizeof(x))
// Format of each probe arguments as operand.
// Size of the argument tagged with CAFFE_SDT_Sn, with "n" constraint.
// Value of the argument tagged with CAFFE_SDT_An, with configured constraint.
#define CAFFE_SDT_ARG(n, x) \
[CAFFE_SDT_S##n] "n" ((size_t)CAFFE_SDT_ARGSIZE(x)), \
[CAFFE_SDT_A##n] CAFFE_SDT_ARG_CONSTRAINT (x)
// Templates to append arguments as operands.
#define CAFFE_SDT_OPERANDS_0() [__sdt_dummy] "g" (0)
#define CAFFE_SDT_OPERANDS_1(_1) CAFFE_SDT_ARG(1, _1)
#define CAFFE_SDT_OPERANDS_2(_1, _2) \
CAFFE_SDT_OPERANDS_1(_1), CAFFE_SDT_ARG(2, _2)
#define CAFFE_SDT_OPERANDS_3(_1, _2, _3) \
CAFFE_SDT_OPERANDS_2(_1, _2), CAFFE_SDT_ARG(3, _3)
#define CAFFE_SDT_OPERANDS_4(_1, _2, _3, _4) \
CAFFE_SDT_OPERANDS_3(_1, _2, _3), CAFFE_SDT_ARG(4, _4)
#define CAFFE_SDT_OPERANDS_5(_1, _2, _3, _4, _5) \
CAFFE_SDT_OPERANDS_4(_1, _2, _3, _4), CAFFE_SDT_ARG(5, _5)
#define CAFFE_SDT_OPERANDS_6(_1, _2, _3, _4, _5, _6) \
CAFFE_SDT_OPERANDS_5(_1, _2, _3, _4, _5), CAFFE_SDT_ARG(6, _6)
#define CAFFE_SDT_OPERANDS_7(_1, _2, _3, _4, _5, _6, _7) \
CAFFE_SDT_OPERANDS_6(_1, _2, _3, _4, _5, _6), CAFFE_SDT_ARG(7, _7)
#define CAFFE_SDT_OPERANDS_8(_1, _2, _3, _4, _5, _6, _7, _8) \
CAFFE_SDT_OPERANDS_7(_1, _2, _3, _4, _5, _6, _7), CAFFE_SDT_ARG(8, _8)
// Templates to reference the arguments from operands in note section.
#define CAFFE_SDT_ARGFMT(no) %n[CAFFE_SDT_S##no]@%[CAFFE_SDT_A##no]
#define CAFFE_SDT_ARG_TEMPLATE_0 /*No arguments*/
#define CAFFE_SDT_ARG_TEMPLATE_1 CAFFE_SDT_ARGFMT(1)
#define CAFFE_SDT_ARG_TEMPLATE_2 CAFFE_SDT_ARG_TEMPLATE_1 CAFFE_SDT_ARGFMT(2)
#define CAFFE_SDT_ARG_TEMPLATE_3 CAFFE_SDT_ARG_TEMPLATE_2 CAFFE_SDT_ARGFMT(3)
#define CAFFE_SDT_ARG_TEMPLATE_4 CAFFE_SDT_ARG_TEMPLATE_3 CAFFE_SDT_ARGFMT(4)
#define CAFFE_SDT_ARG_TEMPLATE_5 CAFFE_SDT_ARG_TEMPLATE_4 CAFFE_SDT_ARGFMT(5)
#define CAFFE_SDT_ARG_TEMPLATE_6 CAFFE_SDT_ARG_TEMPLATE_5 CAFFE_SDT_ARGFMT(6)
#define CAFFE_SDT_ARG_TEMPLATE_7 CAFFE_SDT_ARG_TEMPLATE_6 CAFFE_SDT_ARGFMT(7)
#define CAFFE_SDT_ARG_TEMPLATE_8 CAFFE_SDT_ARG_TEMPLATE_7 CAFFE_SDT_ARGFMT(8)
// Structure of note section for the probe.
#define CAFFE_SDT_NOTE_CONTENT(provider, name, arg_template) \
CAFFE_SDT_ASM_1(990: CAFFE_SDT_NOP) \
CAFFE_SDT_ASM_3( .pushsection .note.stapsdt,"","note") \
CAFFE_SDT_ASM_1( .balign 4) \
CAFFE_SDT_ASM_3( .4byte 992f-991f, 994f-993f, CAFFE_SDT_NOTE_TYPE) \
CAFFE_SDT_ASM_1(991: .asciz CAFFE_SDT_NOTE_NAME) \
CAFFE_SDT_ASM_1(992: .balign 4) \
CAFFE_SDT_ASM_1(993: CAFFE_SDT_ASM_ADDR 990b) \
CAFFE_SDT_ASM_1( CAFFE_SDT_ASM_ADDR 0) /*Reserved for Semaphore address*/\
CAFFE_SDT_ASM_1( CAFFE_SDT_ASM_ADDR 0) /*Reserved for Semaphore name*/ \
CAFFE_SDT_ASM_STRING(provider) \
CAFFE_SDT_ASM_STRING(name) \
CAFFE_SDT_ASM_STRING(arg_template) \
CAFFE_SDT_ASM_1(994: .balign 4) \
CAFFE_SDT_ASM_1( .popsection)
// Main probe Macro.
#define CAFFE_SDT_PROBE(provider, name, n, arglist) \
__asm__ __volatile__ ( \
CAFFE_SDT_NOTE_CONTENT(provider, name, CAFFE_SDT_ARG_TEMPLATE_##n) \
:: CAFFE_SDT_OPERANDS_##n arglist \
) \
// Helper Macros to handle variadic arguments.
#define CAFFE_SDT_NARG_(_0, _1, _2, _3, _4, _5, _6, _7, _8, N, ...) N
#define CAFFE_SDT_NARG(...) \
CAFFE_SDT_NARG_(__VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#define CAFFE_SDT_PROBE_N(provider, name, N, ...) \
CAFFE_SDT_PROBE(provider, name, N, (__VA_ARGS__))
| 5,555
| 54.009901
| 80
|
h
|
null |
pytorch-main/caffe2/core/stats.h
|
#pragma once
#include <atomic>
#include <memory>
#include <mutex>
#include <string>
#include <unordered_map>
#include <vector>
#include "caffe2/core/logging.h"
#include "caffe2/core/static_tracepoint.h"
namespace caffe2 {
class TORCH_API StatValue {
std::atomic<int64_t> v_{0};
public:
int64_t increment(int64_t inc) {
return v_ += inc;
}
int64_t reset(int64_t value = 0) {
return v_.exchange(value);
}
int64_t get() const {
return v_.load();
}
};
struct TORCH_API ExportedStatValue {
std::string key;
int64_t value;
std::chrono::time_point<std::chrono::high_resolution_clock> ts;
};
/**
* @brief Holds names and values of counters exported from a StatRegistry.
*/
using ExportedStatList = std::vector<ExportedStatValue>;
using ExportedStatMap = std::unordered_map<std::string, int64_t>;
TORCH_API ExportedStatMap toMap(const ExportedStatList& stats);
/**
* @brief Holds a map of atomic counters keyed by name.
*
* The StatRegistry singleton, accessed through StatRegistry::get(), holds
* counters registered through the macro CAFFE_EXPORTED_STAT. Example of usage:
*
* struct MyCaffeClass {
* MyCaffeClass(const std::string& instanceName): stats_(instanceName) {}
* void run(int numRuns) {
* try {
* CAFFE_EVENT(stats_, num_runs, numRuns);
* tryRun(numRuns);
* CAFFE_EVENT(stats_, num_successes);
* } catch (std::exception& e) {
* CAFFE_EVENT(stats_, num_failures, 1, "arg_to_usdt", e.what());
* }
* CAFFE_EVENT(stats_, usdt_only, 1, "arg_to_usdt");
* }
* private:
* struct MyStats {
* CAFFE_STAT_CTOR(MyStats);
* CAFFE_EXPORTED_STAT(num_runs);
* CAFFE_EXPORTED_STAT(num_successes);
* CAFFE_EXPORTED_STAT(num_failures);
* CAFFE_STAT(usdt_only);
* } stats_;
* };
*
* int main() {
* MyCaffeClass a("first");
* MyCaffeClass b("second");
* for (const auto i : c10::irange(10)) {
* a.run(10);
* b.run(5);
* }
* ExportedStatList finalStats;
* StatRegistry::get().publish(finalStats);
* }
*
* For every new instance of MyCaffeClass, a new counter is created with
* the instance name as prefix. Everytime run() is called, the corresponding
* counter will be incremented by the given value, or 1 if value not provided.
*
* Counter values can then be exported into an ExportedStatList. In the
* example above, considering "tryRun" never throws, `finalStats` will be
* populated as follows:
*
* first/num_runs 100
* first/num_successes 10
* first/num_failures 0
* second/num_runs 50
* second/num_successes 10
* second/num_failures 0
*
* The event usdt_only is not present in ExportedStatList because it is declared
* as CAFFE_STAT, which does not create a counter.
*
* Additionally, for each call to CAFFE_EVENT, a USDT probe is generated.
* The probe will be set up with the following arguments:
* - Probe name: field name (e.g. "num_runs")
* - Arg #0: instance name (e.g. "first", "second")
* - Arg #1: For CAFFE_EXPORTED_STAT, value of the updated counter
* For CAFFE_STAT, -1 since no counter is available
* - Args ...: Arguments passed to CAFFE_EVENT, including update value
* when provided.
*
* It is also possible to create additional StatRegistry instances beyond
* the singleton. These instances are not automatically populated with
* CAFFE_EVENT. Instead, they can be populated from an ExportedStatList
* structure by calling StatRegistry::update().
*
*/
class TORCH_API StatRegistry {
std::mutex mutex_;
std::unordered_map<std::string, std::unique_ptr<StatValue>> stats_;
public:
/**
* Retrieve the singleton StatRegistry, which gets populated
* through the CAFFE_EVENT macro.
*/
static StatRegistry& get();
/**
* Add a new counter with given name. If a counter for this name already
* exists, returns a pointer to it.
*/
StatValue* add(const std::string& name);
/**
* Populate an ExportedStatList with current counter values.
* If `reset` is true, resets all counters to zero. It is guaranteed that no
* count is lost.
*/
void publish(ExportedStatList& exported, bool reset = false);
ExportedStatList publish(bool reset = false) {
ExportedStatList stats;
publish(stats, reset);
return stats;
}
/**
* Update values of counters contained in the given ExportedStatList to
* the values provided, creating counters that don't exist.
*/
void update(const ExportedStatList& data);
~StatRegistry();
};
struct TORCH_API Stat {
std::string groupName;
std::string name;
Stat(const std::string& gn, const std::string& n) : groupName(gn), name(n) {}
template <typename... Unused>
int64_t increment(Unused...) {
return -1;
}
};
class TORCH_API ExportedStat : public Stat {
StatValue* value_;
public:
ExportedStat(const std::string& gn, const std::string& n)
: Stat(gn, n), value_(StatRegistry::get().add(gn + "/" + n)) {}
int64_t increment(int64_t value = 1) {
return value_->increment(value);
}
template <typename T, typename Unused1, typename... Unused>
int64_t increment(T value, Unused1, Unused...) {
return increment(value);
}
};
class TORCH_API AvgExportedStat : public ExportedStat {
private:
ExportedStat count_;
public:
AvgExportedStat(const std::string& gn, const std::string& n)
: ExportedStat(gn, n + "/sum"), count_(gn, n + "/count") {}
int64_t increment(int64_t value = 1) {
count_.increment();
return ExportedStat::increment(value);
}
template <typename T, typename Unused1, typename... Unused>
int64_t increment(T value, Unused1, Unused...) {
return increment(value);
}
};
class TORCH_API StdDevExportedStat : public ExportedStat {
// Uses an offset (first_) to remove issue of cancellation
// Variance is then (sumsqoffset_ - (sumoffset_^2) / count_) / (count_ - 1)
private:
ExportedStat count_;
ExportedStat sumsqoffset_;
ExportedStat sumoffset_;
std::atomic<int64_t> first_{std::numeric_limits<int64_t>::min()};
int64_t const_min_{std::numeric_limits<int64_t>::min()};
public:
StdDevExportedStat(const std::string& gn, const std::string& n)
: ExportedStat(gn, n + "/sum"),
count_(gn, n + "/count"),
sumsqoffset_(gn, n + "/sumsqoffset"),
sumoffset_(gn, n + "/sumoffset") {}
int64_t increment(int64_t value = 1) {
first_.compare_exchange_strong(const_min_, value);
int64_t offset_value = first_.load();
int64_t orig_value = value;
value -= offset_value;
count_.increment();
sumsqoffset_.increment(value * value);
sumoffset_.increment(value);
return ExportedStat::increment(orig_value);
}
template <typename T, typename Unused1, typename... Unused>
int64_t increment(T value, Unused1, Unused...) {
return increment(value);
}
};
class TORCH_API DetailedExportedStat : public ExportedStat {
private:
std::vector<ExportedStat> details_;
public:
DetailedExportedStat(const std::string& gn, const std::string& n)
: ExportedStat(gn, n) {}
void setDetails(const std::vector<std::string>& detailNames) {
details_.clear();
for (const auto& detailName : detailNames) {
details_.emplace_back(groupName, name + "/" + detailName);
}
}
template <typename T, typename... Unused>
int64_t increment(T value, size_t detailIndex, Unused...) {
if (detailIndex < details_.size()) {
details_[detailIndex].increment(value);
}
return ExportedStat::increment(value);
}
};
class TORCH_API StaticStat : public Stat {
private:
StatValue* value_;
public:
StaticStat(const std::string& groupName, const std::string& name)
: Stat(groupName, name),
value_(StatRegistry::get().add(groupName + "/" + name)) {}
int64_t increment(int64_t value = 1) {
return value_->reset(value);
}
template <typename T, typename Unused1, typename... Unused>
int64_t increment(T value, Unused1, Unused...) {
return increment(value);
}
};
namespace detail {
template <class T>
struct _ScopeGuard {
T f_;
std::chrono::high_resolution_clock::time_point start_;
explicit _ScopeGuard(T f)
: f_(f), start_(std::chrono::high_resolution_clock::now()) {}
~_ScopeGuard() {
using namespace std::chrono;
auto duration = high_resolution_clock::now() - start_;
int64_t nanos = duration_cast<nanoseconds>(duration).count();
f_(nanos);
}
// Using implicit cast to bool so that it can be used in an 'if' condition
// within CAFFE_DURATION macro below.
/* implicit */ operator bool() {
return true;
}
};
template <class T>
_ScopeGuard<T> ScopeGuard(T f) {
return _ScopeGuard<T>(f);
}
} // namespace detail
#define CAFFE_STAT_CTOR(ClassName) \
ClassName(std::string name) : groupName(name) {} \
std::string groupName
#define CAFFE_EXPORTED_STAT(name) \
ExportedStat name { \
groupName, #name \
}
#define CAFFE_AVG_EXPORTED_STAT(name) \
AvgExportedStat name { \
groupName, #name \
}
#define CAFFE_STDDEV_EXPORTED_STAT(name) \
StdDevExportedStat name { \
groupName, #name \
}
#define CAFFE_DETAILED_EXPORTED_STAT(name) \
DetailedExportedStat name { \
groupName, #name \
}
#define CAFFE_STAT(name) \
Stat name { \
groupName, #name \
}
#define CAFFE_STATIC_STAT(name) \
StaticStat name { \
groupName, #name \
}
#define CAFFE_EVENT(stats, field, ...) \
{ \
auto __caffe_event_value_ = stats.field.increment(__VA_ARGS__); \
CAFFE_SDT( \
field, \
stats.field.groupName.c_str(), \
__caffe_event_value_, \
##__VA_ARGS__); \
(void)__caffe_event_value_; \
}
#define CAFFE_DURATION(stats, field, ...) \
if (auto g = ::caffe2::detail::ScopeGuard([&](int64_t nanos) { \
CAFFE_EVENT(stats, field, nanos, ##__VA_ARGS__); \
}))
} // namespace caffe2
| 10,443
| 28.091922
| 80
|
h
|
null |
pytorch-main/caffe2/core/storage.h
|
#ifndef CAFFE2_CORE_STORAGE_H_
#define CAFFE2_CORE_STORAGE_H_
#include <cstddef>
#include <cstdint>
#include <fstream>
#include <sstream>
#include <type_traits>
#include <typeinfo>
#include <vector>
#include "caffe2/core/allocator.h"
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/flags.h"
#include "caffe2/core/logging.h"
#include <c10/util/typeid.h>
#include <c10/core/Allocator.h>
#include <c10/core/Device.h>
#include <c10/core/DeviceType.h>
#include <c10/util/intrusive_ptr.h>
#include <c10/core/Storage.h>
#include <c10/core/StorageImpl.h>
namespace caffe2 {
using StorageImpl = at::StorageImpl;
using Storage = at::Storage;
} // namespace caffe2
#endif // CAFFE2_CORE_STORAGE_H_
| 733
| 20.588235
| 36
|
h
|
null |
pytorch-main/caffe2/core/tensor.h
|
#ifndef CAFFE2_CORE_TENSOR_H_
#define CAFFE2_CORE_TENSOR_H_
#include <c10/macros/Macros.h>
#include "caffe2/core/storage.h"
#include <c10/core/SymIntArrayRef.h>
#include <ATen/core/UndefinedTensorImpl.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/ExclusivelyOwned.h>
#include <c10/util/ExclusivelyOwnedTensorTraits.h>
#include <c10/util/intrusive_ptr.h>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
#endif
#if defined(EXPOSE_C2_OPS) || \
!defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE)
namespace at {
class Tensor;
};
#endif
namespace caffe2 {
using at::UndefinedTensorImpl;
/**
* @brief Tensor class holds a shared pointer to the implementation TensorImpl,
* redirects API calls to TensorImpl;
* Copying of Tensor results in sharing the same underlying implementation
* object
*
* NB: See TensorImpl for documentation on these methods.
*/
class TORCH_API Tensor final {
private:
enum Unsafe { IDoWantAliasing };
Tensor(const Tensor& other, Unsafe _) : impl_(other.getIntrusivePtr()) {}
protected:
using TensorImplPtr = c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>;
TensorImplPtr impl_;
void enforce_invariants();
public:
Tensor() : impl_() {}
Tensor(const Tensor& t) : impl_(t.impl_) {}
Tensor& operator=(const Tensor& t) {
impl_ = t.impl_;
return *this;
}
Tensor(Tensor&&) = default;
Tensor& operator=(Tensor&&) = default;
operator bool() const {
return impl_.defined();
}
TensorImpl* unsafeGetTensorImpl() const {
return impl_.get();
}
TensorImpl* unsafeReleaseTensorImpl() {
return impl_.release();
}
Tensor UnsafeSharedInstance() const {
return Tensor(*this, IDoWantAliasing);
}
/**
* @brief Creates a tensor of the given device type.
*
* Note that the actual data allocation is not going to be carried out until
* you resize the tensor and then call mutable_data().
*/
explicit Tensor(at::Device device)
: impl_(c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(
Storage::create_legacy(device),
c10::computeDispatchKey(c10::nullopt, at::kStrided, device),
TypeMeta())) {}
/**
* @brief Creates a tensor of the given dimension.
*
* Note that the actual data allocation is not going to be carried out until
* the first time mutable_data() is called.
*/
explicit Tensor(at::IntArrayRef dims, DeviceType type) : Tensor(type) {
// TODO: here, we create a Storage
// and immediately discard it in Resize() since
// reset_tensor will be true and FreeMemory will be called,
// we might want to avoid creating Storage twice?
Resize(dims);
}
// we want to preserve index information
explicit Tensor(at::IntArrayRef dims, at::Device device) : Tensor(device) {
Resize(dims);
}
// TODO: remove?
explicit Tensor(const vector<int>& dims, DeviceType type) : Tensor(type) {
Resize(dims);
}
/**
* @brief: Create a Tensor of at::DeviceType `type` and initialize it with
* src Tensor
*/
Tensor(const Tensor& src, DeviceType type) : Tensor(type) {
CopyFrom(src);
}
/**
* @brief Mutual conversion with at::Tensor
*
* The tensor will share the same instance (data, strides, sizes, etc) but
* a different subset of APIs would be available
*/
#if defined(EXPOSE_C2_OPS) || \
!defined(CAFFE2_IS_XPLAT_BUILD) && !defined(C10_MOBILE)
explicit Tensor(at::Tensor tensor);
explicit operator at::Tensor() const&;
explicit operator at::Tensor() &&;
#endif
bool is_same(const Tensor& other) const noexcept {
return impl_ == other.impl_;
}
Tensor Clone() const {
Tensor x(GetDevice());
x.CopyFrom(*this);
return x;
}
/**
* Clone self as a Tensor that share the same Storage,
* that is, both Tensors are views on the same Storage.
* If we change the sizes or strides of one Tensor, it
* does not affect the other Tensor that it shares Storage
* with.
* A similar yet different usage is `Tensor x = y;`, this
* will make x and y pointing to the same Tensor and resizing
* one of them will resize the other as well.
*
* TODO: Deduplicate this with THTensor_(newWithTensor)
* (exposed in ATen as at::alias but not otherwise available)
*/
Tensor Alias() const {
Tensor x(sizes(), GetDevice());
if (!dtype_initialized()) {
C10_LOG_EVERY_MS(WARNING, 1000)
<< "Cloning a tensor that don't have a data type (did you call mutable_data<T> on the tensor?)";
}
AT_ASSERTM(
storage_initialized(),
"Cloning a tensor that has no content and has size > 0");
// set_storage already sets data_type_ of TensorImpl
x.impl_->set_storage_and_dtype(storage(), impl_->dtype());
x.impl_->set_storage_offset(impl_->storage_offset());
x.impl_->set_sizes_and_strides(sizes(), strides());
return x;
}
DeviceType GetDeviceType() const {
return impl_->device_type();
}
at::Device GetDevice() const {
return impl_.get()->device();
}
/**
* @brief Copies the data from a source tensor, with a context provided to
* carry out the underlying memcpy operation. This method respects
* caffe2_keep_on_shrink.
*
* After CopyFrom, this function guarantees that the destination tensor will
* have the same initialization state and dtype as src. This function
* preserves the DeviceType of the source tensor (so, e.g., if you allocate
* a tensor on CPU and then CopyFrom a CUDA tensor, that will to a
* CUDA-to-CPU transfer).
*
* 'async' parameter triggers async copy for CUDA tensors
*/
void CopyFrom(const Tensor& src, bool async = false);
/**
* @brief Extend the outer-most dimension of this tensor
* to dimension of `num`.
*/
void ExtendTo(int64_t num, float growthPct) const {
CAFFE_ENFORCE_GE_WITH_CALLER(impl_->dim(), 1);
CAFFE_ENFORCE_GE_WITH_CALLER(growthPct, 0);
Extend(num - impl_->size(0), growthPct);
}
void Extend(int64_t num, float growthPct) const {
impl_.get()->Extend(num, growthPct);
}
/**
* @brief Shrinks the outer-most dimension to given size, keeping the data.
*
* This method guarantees that no re-allocations are carried out, which means
* that the extra capacity after the end of the shrunk tensor is maintained.
* Notably, this function does NOT respect caffe2_keep_on_shrink.
*/
void ShrinkTo(int64_t outer_dim) const {
CAFFE_ENFORCE_WITH_CALLER(
impl_->is_contiguous(),
"Right now ShrinkTo is only supported on contiguous Tensor.");
CAFFE_ENFORCE_WITH_CALLER(impl_->dim() >= 1, "Tensor must be at least 1D");
CAFFE_ENFORCE_WITH_CALLER(
outer_dim <= impl_->size(0),
"New outer dimension must be smaller than current.");
CAFFE_ENFORCE(
impl_->storage().unique(),
"Can't call ShrinkTo on shared storage, please call Resize instead.");
impl_.get()->set_size(0, outer_dim);
}
template <class T>
void ReserveSpace(const T& outer_dim) const {
impl_.get()->ReserveSpace(outer_dim);
}
template <typename... Ts>
void Resize(Ts... dim_source) const {
impl_.get()->Resize(dim_source...);
}
template <typename T>
void Resize(const std::vector<T>& dim_source) const {
impl_.get()->Resize(ArrayRef<T>(dim_source));
}
/**
* Resize the tensor like the source tensor. Note that this is just a
* sugar wrapper that essentially calls Resize(src_tensor.dims()).
* This method respects caffe2_keep_on_shrink.
*/
inline void ResizeLike(const Tensor& src_tensor) const {
CAFFE_ENFORCE_WITH_CALLER(
src_tensor.is_contiguous(),
"Right now ResizeLike is only supported for contiguous Tensor.");
if (impl_ != src_tensor.impl_) {
impl_.get()->Resize(src_tensor.sizes());
}
}
inline void Reshape(const vector<int64_t>& dims) const {
impl_.get()->Reshape(dims);
}
inline void Reshape(const vector<int>& dims) const {
impl_.get()->Reshape(ToVectorint64_t(dims));
}
inline void FreeMemory() const {
impl_.get()->FreeMemory();
}
/**
* A utility function to print the debug string for the tensor. Note that this
* is very slow since it involves quite some string operations, so do not use
* it in your performance-critical code.
*/
string DebugString() const {
std::stringstream ss;
ss << "A Tensor of item size " << impl_->dtype().itemsize() << " and type "
<< impl_->dtype().name() << " and dimension (";
for (int d : impl_->sizes()) {
ss << d << ",";
}
ss << ").";
return ss.str();
}
// To be deprecated
void ShareData(const Tensor& src) const {
impl_.get()->ShareData(*src.impl_.get());
}
/**
* @brief Shares the data with an externally managed pointer.
*
* This is similar to ShareData() but the source is a pointer with an advanced
* deleter option. In default, no deletion takes place, and one needs to make
* sure that the external memory is deallocated only after the tensor finishes
* using it. If a Deleter object is passed in, when this tensor is reallocated
* or freed, the deleter function is going to be called.
*/
template <typename T>
void ShareExternalPointer(
T* src,
size_t nbytes = 0,
MemoryDeleter d = nullptr) const {
ShareExternalPointer((void*)src, caffe2::TypeMeta::Make<T>(), nbytes, d);
}
template <typename T>
void ShareExternalPointer(at::DataPtr&& data_ptr, size_t nbytes = 0) const {
ShareExternalPointer(
std::move(data_ptr), caffe2::TypeMeta::Make<T>(), nbytes);
}
void ShareExternalPointer(
void* src,
const TypeMeta data_type,
size_t nbytes = 0,
MemoryDeleter d = nullptr) const {
CAFFE_ENFORCE_WITH_CALLER(
impl_->is_contiguous(),
"Right now ShareExternalPointer is only supported for contiguous Tensor.");
CAFFE_ENFORCE_WITH_CALLER(
data_type != ScalarType::Undefined,
"To share with a raw external pointer you need to pass in an "
"initialized data_type(TypeMeta).");
impl_.get()->ShareExternalPointer(
at::DataPtr(src, src, d, impl_->device_type()), data_type, nbytes);
}
void ShareExternalPointer(
at::DataPtr&& data_ptr,
const TypeMeta data_type,
size_t nbytes) {
impl_.get()->ShareExternalPointer(std::move(data_ptr), data_type, nbytes);
}
const c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>& getIntrusivePtr()
const {
return impl_;
}
bool defined() const {
return impl_;
}
/**
* Returns a raw void* pointer of the underlying storage. mutable_data()
* or raw_mutable_data() must have been called prior to this function call.
*/
inline void* raw_data() const {
return impl_->mutable_data();
}
template <typename T>
inline T* data() const {
return impl_.get()->mutable_data_dtype_initialized<T>();
}
inline void* raw_mutable_data(const TypeMeta meta) const {
return impl_.get()->raw_mutable_data(meta);
}
/**
* Returns a mutable raw pointer of the underlying storage. This can only be
* used when you know for sure that the underlying storage of the tensor is
* already created via an earlier raw_mutable_data(meta) call or a
* mutable_data<T>() call.
*
* If the existing data does not match the desired type, it will be deleted
* and a new storage will be created.
*/
inline void* raw_mutable_data() const {
const auto& data_type = impl_->dtype();
CAFFE_ENFORCE_WITH_CALLER(
data_type != ScalarType::Undefined,
"Calling raw_mutable_data() without meta, but the current meta is "
"of unknown type.");
return raw_mutable_data(data_type);
}
template <typename T>
inline T* mutable_data() const {
return impl_.get()->mutable_data<T>();
}
/**
* Returns the number of dimensions of the data.
*/
inline int dim() const {
return impl_->dim();
}
/**
* (To be deprecated) Returns the number of dimensions of the data.
*/
inline int ndim() const {
return impl_->dim();
}
/**
* (To be deprecated) Returns the size (i.e. the number of items) of the
* tensor.
*/
inline int64_t size() const {
return impl_->numel();
}
/**
* Returns the number of items of the tensor.
*/
inline int64_t numel() const {
return impl_->numel();
}
/**
* Return the number of bytes each item takes in the tensor.
*/
inline size_t itemsize() const {
return impl_->dtype().itemsize();
}
/**
* Returns the total number of bytes of the storage.
*
* This is equivalent to calling size() * itemsize().
*/
inline size_t nbytes() const {
return impl_->numel() * itemsize();
}
inline at::IntArrayRef sizes() const {
return impl_.get()->sizes();
}
inline c10::SymIntArrayRef sym_sizes() const {
return impl_->sym_sizes();
}
inline c10::SymInt sym_numel() const {
return impl_->sym_numel();
}
inline c10::SymIntArrayRef sym_strides() const {
return impl_->sym_strides();
}
inline int64_t size_from_dim(int k) const {
return size_from_dim_(k, impl_->sizes());
}
inline int64_t size_to_dim(int k) const {
return size_to_dim_(k, impl_->sizes());
}
inline int64_t size_between_dim(int k, int l) const {
return size_between_dim_(k, l, impl_->sizes());
}
/**
* Returns the 'canonical' version of a (usually) user-specified axis,
* allowing for negative indexing (e.g., -1 for the last axis).
*
* @param axis_index the axis index.
* If 0 <= index < dim(), return index.
* If -ndim <= index <= -1, return (dim() - (-index)),
* e.g., the last axis index (dim() - 1) if index == -1,
* the second to last if index == -2, etc.
* Dies on out of range index.
*/
inline int canonical_axis_index(int axis_index) const {
return canonical_axis_index_(axis_index, impl_->dim());
}
inline int64_t stride(int64_t dim) const {
return impl_.get()->stride(dim);
}
inline at::IntArrayRef strides() const {
return impl_.get()->strides();
}
inline bool is_contiguous(
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const {
return impl_.get()->is_contiguous(memory_format);
}
/**
* Checks if the tensor content is of the given data type.
*/
template <typename T>
inline bool IsType() const {
return impl_->dtype().Match<T>();
}
/**
* Returns the TypeMeta object associated with the current data type.
*/
inline const TypeMeta dtype() const {
return impl_->dtype();
}
/**
* (To be deprecated) Returns the TypeMeta object associated with the current
* data type.
*/
inline const TypeMeta meta() const {
return impl_->dtype();
}
/**
* Returns the i-th dimension of the tensor in int.
*
* This function returns an int value instead of int64_t, which depending on
* the typedef could be int64. If you want int64 dim values, make sure you
* call dim() instead.
*/
inline int dim32(const int i) const {
#ifndef NDEBUG
CAFFE_ENFORCE_LT_WITH_CALLER(
i, static_cast<int>(impl_->dim()), "Exceeding ndim limit");
CAFFE_ENFORCE_GE_WITH_CALLER(i, 0, "Cannot have negative dimension index");
#endif
// Avoid TensorImpl::size() because it is a virtual call that
// supports out-of-range indexing like Python.
auto s = impl_->sizes()[i];
CAFFE_ENFORCE_LT_WITH_CALLER(s, std::numeric_limits<int>::max());
return static_cast<int>(s);
}
inline int64_t size(const int i) const {
return impl_->size(i);
}
// To be deprecated
inline int64_t dim(const int i) const {
return impl_->size(i);
}
const Storage& storage() {
return impl_->storage();
}
const Storage& storage() const {
return impl_->storage();
}
bool storage_initialized() const {
return impl_->storage_initialized();
}
bool dtype_initialized() const {
return impl_->dtype_initialized();
}
};
/**
* Reinitialize a Tensor to given dims and options if necessary, note that
* this will not do anything if the
* Tensor already has correct size and data type
*/
TORCH_API void
ReinitializeTensor(Tensor* t, at::IntArrayRef dims, at::TensorOptions options);
TORCH_API void ReinitializeAndCopyFrom(
Tensor* t,
at::TensorOptions options,
const Tensor& src,
bool async = false);
using TensorCPU = Tensor;
constexpr int k_limit_default_ = 1000;
// TODO: the following logic can be merged into regular Tensor class methods
// after MKLMemory starts to implement Tensor interface
// Type call registry
typedef TypeMeta (*TypeCall)(const void*);
TypeCall GetTypeCallFunction(TypeIdentifier id);
void RegisterTypeCallFunction(TypeIdentifier id, TypeCall c);
// Shape call registry
typedef vector<int64_t> (
*TensorInfoCall)(const void*, size_t* capacity, DeviceOption* device);
TensorInfoCall GetTensorInfoFunction(TypeIdentifier id);
void RegisterTensorInfoFunction(TypeIdentifier id, TensorInfoCall c);
// resize helper function
void TensorVectorResize(
std::vector<Tensor>& tensors,
int size,
DeviceType type);
// Tensor factory function
TORCH_API Tensor empty(at::IntArrayRef dims, at::TensorOptions options);
/**
* @brief Creates a CPU tensor, and fills its contents with the given values.
* Values are copied in
*/
// TODO: can be unified with at::from_blob when Tensor is merged and string
// types are supported
template <typename T>
Tensor TensorCPUFromValues(at::IntArrayRef dims, at::ArrayRef<T> values) {
Tensor r = empty(dims, at::device(CPU).dtype<T>());
CAFFE_ENFORCE_EQ(values.size(), r.numel());
CPUContext context;
context.CopyItemsFromCPU(
r.dtype(), values.size(), values.data(), r.mutable_data<T>());
return r;
}
vector<int64_t>
GetTensorInfo(const void* c, size_t* capacity, DeviceOption* device);
class TORCH_API TensorPrinter {
public:
explicit TensorPrinter(
const std::string& tensor_name = "",
const std::string& file_name = "",
int limit = k_limit_default_);
~TensorPrinter();
template <class T>
void Print(const Tensor& tensor);
void PrintMeta(const Tensor& tensor);
string MetaStr(const Tensor& tensor);
private:
bool to_file_;
int limit_;
std::unique_ptr<std::ofstream> log_file_;
std::string tensor_name_;
};
template <class T>
void TensorPrinter::Print(const Tensor& tensor) {
std::stringstream values_stream;
// One most likely doesn't want to print int64-number of items for visual
// inspection, so we cast down to int here.
int total_count = static_cast<int>(std::min(tensor.numel(), int64_t(limit_)));
const T* tensor_data = tensor.template data<T>();
for (int i = 0; i < total_count - 1; ++i) {
values_stream << tensor_data[i] << ",";
}
if (total_count) {
// We do not add a comma after the last item.
values_stream << tensor_data[total_count - 1];
}
if (to_file_) {
(*log_file_) << MetaStr(tensor) << values_stream.str() << std::endl;
} else {
// Log to console.
LOG(INFO) << MetaStr(tensor) << values_stream.str();
}
}
CAFFE_DECLARE_KNOWN_TYPE(Tensor, Caffe2Tensor)
} // namespace caffe2
C10_CLANG_DIAGNOSTIC_POP()
namespace c10 {
template <>
struct ExclusivelyOwnedTraits<caffe2::Tensor> : public c10::ExclusivelyOwnedTensorTraits<caffe2::Tensor> {};
} // namespace c10
#endif // CAFFE2_CORE_TENSOR_H_
| 19,521
| 27.921481
| 108
|
h
|
null |
pytorch-main/caffe2/core/test_utils.h
|
#ifndef CAFFE2_UTILS_TEST_UTILS_H_
#define CAFFE2_UTILS_TEST_UTILS_H_
#include "caffe2/core/tensor.h"
#include "caffe2/core/workspace.h"
#include "caffe2/utils/proto_utils.h"
#include <c10/macros/Macros.h>
#include <c10/util/irange.h>
#include <cmath>
#include <string>
#include <vector>
// Utilities that make it easier to write caffe2 C++ unit tests.
// These utils are designed to be concise and easy to use. They may sacrifice
// performance and should only be used in tests/non production code.
namespace caffe2 {
namespace testing {
// Asserts that the values of two tensors are the same.
TORCH_API void assertTensorEquals(
const TensorCPU& tensor1,
const TensorCPU& tensor2,
float eps = 1e-6);
// Asserts that two float values are close within epsilon.
TORCH_API void assertNear(float value1, float value2, float epsilon);
// Asserts that the numeric values of a tensor is equal to a data vector.
template <typename T>
void assertTensorEquals(
const TensorCPU& tensor,
const std::vector<T>& data,
float epsilon = 0.1f) {
CAFFE_ENFORCE(tensor.IsType<T>());
CAFFE_ENFORCE_EQ(tensor.numel(), data.size());
for (const auto idx : c10::irange(tensor.numel())) {
if (tensor.IsType<float>()) {
assertNear(tensor.data<T>()[idx], data[idx], epsilon);
} else {
CAFFE_ENFORCE_EQ(tensor.data<T>()[idx], data[idx]);
}
}
}
// Assertion for tensor sizes and values.
template <typename T>
void assertTensor(
const TensorCPU& tensor,
const std::vector<int64_t>& sizes,
const std::vector<T>& data,
float epsilon = 0.1f) {
CAFFE_ENFORCE_EQ(tensor.sizes(), sizes);
assertTensorEquals(tensor, data, epsilon);
}
// Asserts a list of tensors presented in two workspaces are equal.
TORCH_API void assertTensorListEquals(
const std::vector<std::string>& tensorNames,
const Workspace& workspace1,
const Workspace& workspace2);
// Read a tensor from the workspace.
TORCH_API const caffe2::Tensor& getTensor(
const caffe2::Workspace& workspace,
const std::string& name);
// Create a new tensor in the workspace.
TORCH_API caffe2::Tensor* createTensor(
const std::string& name,
caffe2::Workspace* workspace);
// Create a new operator in the net.
TORCH_API caffe2::OperatorDef* createOperator(
const std::string& type,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,
caffe2::NetDef* net);
// Fill a buffer with randomly generated numbers given range [min, max)
// T can only be float, double or long double
template <typename RealType = float>
void randomFill(
RealType* data,
size_t size,
const double min = 0.0,
const double max = 1.0) {
std::mt19937 gen(42);
std::uniform_real_distribution<RealType> dis(
static_cast<RealType>(min), static_cast<RealType>(max));
for (const auto i : c10::irange(size)) {
data[i] = dis(gen);
}
}
// Fill data from a vector to a tensor.
template <typename T>
void fillTensor(
const std::vector<int64_t>& shape,
const std::vector<T>& data,
TensorCPU* tensor) {
tensor->Resize(shape);
CAFFE_ENFORCE_EQ(data.size(), tensor->numel());
auto ptr = tensor->mutable_data<T>();
for (int i = 0; i < tensor->numel(); ++i) {
ptr[i] = data[i];
}
}
// Create a tensor and fill data.
template <typename T>
caffe2::Tensor* createTensorAndFill(
const std::string& name,
const std::vector<int64_t>& shape,
const std::vector<T>& data,
Workspace* workspace) {
auto* tensor = createTensor(name, workspace);
fillTensor<T>(shape, data, tensor);
return tensor;
}
template <typename T>
caffe2::Tensor createTensorAndFill(
const std::vector<int64_t>& shape,
const std::vector<T>& data) {
Tensor tensor(caffe2::CPU);
fillTensor<T>(shape, data, &tensor);
return tensor;
}
// Fill a constant to a tensor.
template <typename T>
void constantFillTensor(
const vector<int64_t>& shape,
const T& data,
TensorCPU* tensor) {
tensor->Resize(shape);
auto ptr = tensor->mutable_data<T>();
for (int i = 0; i < tensor->numel(); ++i) {
ptr[i] = data;
}
}
// Create a tensor and fill a constant.
template <typename T>
caffe2::Tensor* createTensorAndConstantFill(
const std::string& name,
const std::vector<int64_t>& shape,
const T& data,
Workspace* workspace) {
auto* tensor = createTensor(name, workspace);
constantFillTensor<T>(shape, data, tensor);
return tensor;
}
// Concise util class to mutate a net in a chaining fashion.
class TORCH_API NetMutator {
public:
// NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.UninitializedObject)
explicit NetMutator(caffe2::NetDef* net) : net_(net) {}
NetMutator& newOp(
const std::string& type,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs);
NetMutator& externalInputs(const std::vector<std::string>& externalInputs);
NetMutator& externalOutputs(const std::vector<std::string>& externalOutputs);
// Add argument to the last created op.
template <typename T>
NetMutator& addArgument(const std::string& name, const T& value) {
CAFFE_ENFORCE(lastCreatedOp_ != nullptr);
AddArgument(name, value, lastCreatedOp_);
return *this;
}
// Set device name for the last created op.
NetMutator& setDeviceOptionName(const std::string& name);
private:
caffe2::NetDef* net_;
caffe2::OperatorDef* lastCreatedOp_;
};
// Concise util class to mutate a workspace in a chaining fashion.
class TORCH_API WorkspaceMutator {
public:
explicit WorkspaceMutator(caffe2::Workspace* workspace)
: workspace_(workspace) {}
// New tensor filled by a data vector.
template <typename T>
WorkspaceMutator& newTensor(
const std::string& name,
const std::vector<int64_t>& shape,
const std::vector<T>& data) {
createTensorAndFill<T>(name, shape, data, workspace_);
return *this;
}
// New tensor filled by a constant.
template <typename T>
WorkspaceMutator& newTensorConst(
const std::string& name,
const std::vector<int64_t>& shape,
const T& data) {
createTensorAndConstantFill<T>(name, shape, data, workspace_);
return *this;
}
private:
caffe2::Workspace* workspace_;
};
} // namespace testing
} // namespace caffe2
#endif // CAFFE2_UTILS_TEST_UTILS_H_
| 6,322
| 27.481982
| 79
|
h
|
null |
pytorch-main/caffe2/core/timer.h
|
#ifndef CAFFE2_CORE_TIMER_H_
#define CAFFE2_CORE_TIMER_H_
#include <chrono>
#include "caffe2/core/common.h"
namespace caffe2 {
/**
* @brief A simple timer object for measuring time.
*
* This is a minimal class around a std::chrono::high_resolution_clock that
* serves as a utility class for testing code.
*/
class Timer {
public:
typedef std::chrono::high_resolution_clock clock;
typedef std::chrono::nanoseconds ns;
Timer() { Start(); }
/**
* @brief Starts a timer.
*/
inline void Start() { start_time_ = clock::now(); }
inline float NanoSeconds() {
return static_cast<float>(
std::chrono::duration_cast<ns>(clock::now() - start_time_).count());
}
/**
* @brief Returns the elapsed time in milliseconds.
*/
inline float MilliSeconds() { return NanoSeconds() / 1000000.f; }
/**
* @brief Returns the elapsed time in microseconds.
*/
inline float MicroSeconds() { return NanoSeconds() / 1000.f; }
/**
* @brief Returns the elapsed time in seconds.
*/
inline float Seconds() { return NanoSeconds() / 1000000000.f; }
protected:
std::chrono::time_point<clock> start_time_;
C10_DISABLE_COPY_AND_ASSIGN(Timer);
};
}
#endif // CAFFE2_CORE_TIMER_H_
| 1,218
| 23.877551
| 76
|
h
|
null |
pytorch-main/caffe2/core/transform.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/core/graph.h"
#include "caffe2/core/workspace.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/proto_utils.h"
namespace caffe2 {
/**
* The Transform Base Object
*
* A Transform is an operation which manipulates a Caffe2 NetDef.
* You can consider it as a function: Transform.ApplyTo(NetDef) -> NetDef
*
* A Transform Operation does 4 things:
* 1) Creates a Graph object from a NetDef, which stores connections.
* 2) Pattern Matches on the Graph, to find subgraphs it wants to change.
* 3) Replaces the subgraphs that it's matched with new operators.
* 4) Creates a NetDef from the changed Graph, and returns it.
*
* The effect of a Transform is defined by its 3 protected virtual functions.
* 1) PatternRule determines for an ordered subgraph and a node, whether to
* consider adding the node to the subgraph.
* 2) ValidatorRule determines, for an ordered subgraph, whether it is a
* match.
* 3) ReplaceRule mutates the graph, based on a matched subgraph.
*
* This is the base class for all derived classes to base off. To create your
* own transform, write your implementations for PatternRule, ValidatorRule, and
* ReplaceRule.
*/
class TORCH_API Transform {
public:
Transform() {}
/**
* Apply a Transform onto a NetDef.
* Returns the transformed NetDef.
*/
NetDef ApplyTo(const NetDef& orig_net_def);
virtual ~Transform() {}
/**
* Determines the type of subgraphs that PatternMatch will find.
*
* CONNECTED_SUBGRAPH will only match subgraphs that are connected.
* These subgraphs satisfy that every node of the match is connected to the
* subgraph of the nodes that come before it.
* For example, in the graph (1) --> (2) --> (3) --> (4),
* This is capable of matching the subgraph [2, 3] and [4, 3]
* This is not capable of matching the subgraph [2, 4].
*
*
* SORTED_WRT_EXECUTION_ORDER will match subgraphs that guarantee
* sorted execution order.
* The nodes don't have to be connected. It is faster than General.
* For example, in the graph (1) --> (2) --> (3) --> (4),
* This is capable of matching the subgraph [2, 4], [3, 4].
* This is not capable of matching the subgraph [3, 1], [4, 3].
*
*
* GENERAL can match any subgraph.
* For example, in the graph (1) --> (2) --> (3) --> (4),
* This is capable of matching subgraphs [2, 4], [3, 4], [4, 2, 1].
* There is no ordered subgraph of G that cannot be matched by this.
*/
enum PatternMatchType {
CONNECTED_SUBGRAPH,
SORTED_WRT_EXECUTION_ORDER,
GENERAL
};
/**
* Generates all matches (stored as ordered subgraphs) and returns them.
*
* A match is stored as vector<int>, which is a mapping to OperatorDefs
* in Graph. The order matters.
*/
std::vector<std::vector<int>> PatternMatch(const transform::Graph& graph);
/**
* Applies the replace rule onto each of the matches found.
*/
void ReplacePattern(
const std::vector<std::vector<int>>& matches,
transform::Graph* graph);
protected:
/**
* The PatternRule essentially answers:
* Given the current subgraph (ordered), should we append the new node at idx?
*/
virtual bool PatternRule(
const transform::Graph& g,
const std::vector<int>& subgraph,
int /*idx*/) {
CAFFE_NOT_IMPLEMENTED;
}
/**
* The ValidatorRule essentially answers:
* Given a subgraph, can we accept it?
*/
virtual bool ValidatorRule(
const transform::Graph& g,
const std::vector<int>& subgraph) {
CAFFE_NOT_IMPLEMENTED;
}
/**
* The ReplaceRule actually mutates the graph, and applies the transformation
* upon the subgraph.
*/
virtual bool ReplaceRule(
const std::vector<int>& subgraph,
transform::Graph* g_ptr) {
CAFFE_NOT_IMPLEMENTED;
}
void SetPatternMatchType(PatternMatchType type) {
pattern_match_type_ = type;
}
private:
/**
* A helper function for PatternMatch, which keeps track of the best subgraph
* so far.
*/
void PatternMatchHelper(
const transform::Graph& graph,
const std::vector<bool>& matched,
std::vector<int>* subgraph_ptr,
std::vector<int>* best_subgraph_ptr);
/**
* Attempts to append each neighbor to the end of the subgraph.
*/
void TryNeighbors(
const transform::Graph& graph,
const std::map<int, std::vector<string>>& neighbors,
const std::vector<bool>& matched,
std::vector<int>* subgraph_ptr,
std::vector<int>* best_subgraph_ptr);
PatternMatchType pattern_match_type_ = CONNECTED_SUBGRAPH;
};
// Creates a Transform based on a key, which should be defined in registry.
TORCH_API unique_ptr<Transform> CreateTransform(string key);
C10_DECLARE_REGISTRY(TransformRegistry, Transform);
#define REGISTER_TRANSFORM(name, ...) \
C10_REGISTER_CLASS(TransformRegistry, name, __VA_ARGS__)
// Create a Transform object from registry,
// and immediately apply it to a Netdef.
TORCH_API NetDef ApplyTransform(const string& key, const NetDef& netdef);
// Create a Transform object from registry, apply it to a NetDef.
// Will only return the transformed net if it is faster than the old net.
// This will run the init net first, will run the two nets warmup_runs times.
// Then, we will take the average time of main_runs runs, and only keep the
// transformed net if it is faster by a factor of improvement_threshold.
TORCH_API NetDef ApplyTransformIfFaster(
const string& key,
const NetDef& netdef,
const NetDef& init_netdef,
const int warmup_runs,
const int main_runs,
const double improvement_threshold);
} // namespace
| 5,741
| 31.811429
| 80
|
h
|
null |
pytorch-main/caffe2/core/types.h
|
#ifndef CAFFE2_CORE_TYPES_H_
#define CAFFE2_CORE_TYPES_H_
#include <cstdint>
#include <string>
#include <type_traits>
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
#include <c10/util/typeid.h>
#include "caffe2/proto/caffe2_pb.h"
#include <c10/util/Half.h>
namespace caffe2 {
// Storage orders that are often used in the image applications.
enum StorageOrder {
UNKNOWN = 0,
NHWC = 1,
NCHW = 2,
};
inline StorageOrder StringToStorageOrder(const string& str) {
if (str == "NHWC" || str == "nhwc") {
return StorageOrder::NHWC;
} else if (str == "NCHW" || str == "nchw") {
return StorageOrder::NCHW;
} else {
LOG(ERROR) << "Unknown storage order string: " << str;
return StorageOrder::UNKNOWN;
}
}
inline int32_t GetDimFromOrderString(const std::string& str) {
auto order = StringToStorageOrder(str);
switch (order) {
case StorageOrder::NHWC:
return 3;
case StorageOrder::NCHW:
return 1;
default:
CAFFE_THROW("Unsupported storage order: ", str);
return -1;
}
}
inline constexpr char NameScopeSeparator() { return '/'; }
// From TypeMeta to caffe2::DataType protobuffer enum.
TORCH_API TensorProto::DataType TypeMetaToDataType(const TypeMeta& meta);
// From caffe2::DataType protobuffer enum to TypeMeta
TORCH_API const TypeMeta DataTypeToTypeMeta(const TensorProto::DataType& dt);
} // namespace caffe2
///////////////////////////////////////////////////////////////////////////////
// at::Half is defined in c10/util/Half.h. Currently half float operators are
// mainly on CUDA gpus.
// The reason we do not directly use the cuda __half data type is because that
// requires compilation with nvcc. The float16 data type should be compatible
// with the cuda __half data type, but will allow us to refer to the data type
// without the need of cuda.
static_assert(sizeof(unsigned short) == 2,
"Short on this platform is not 16 bit.");
namespace caffe2 {
// Helpers to avoid using typeinfo with -rtti
template <typename T>
inline bool fp16_type();
template <>
inline bool fp16_type<at::Half>() {
return true;
}
template <typename T>
inline bool fp16_type() {
return false;
}
} // namespace caffe2
#endif // CAFFE2_CORE_TYPES_H_
| 2,249
| 25.785714
| 79
|
h
|
null |
pytorch-main/caffe2/core/workspace.h
|
#ifndef CAFFE2_CORE_WORKSPACE_H_
#define CAFFE2_CORE_WORKSPACE_H_
#include "caffe2/core/common.h"
#include "caffe2/core/observer.h"
#include <climits>
#include <cstddef>
#include <mutex>
#include <typeinfo>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "c10/util/Registry.h"
#include "caffe2/core/blob.h"
#include "caffe2/core/net.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/signal_handler.h"
#include "caffe2/utils/threadpool/ThreadPool.h"
C10_DECLARE_bool(caffe2_print_blob_sizes_at_exit);
namespace caffe2 {
class NetBase;
struct TORCH_API StopOnSignal {
StopOnSignal()
: handler_(std::make_shared<SignalHandler>(
SignalHandler::Action::STOP,
SignalHandler::Action::STOP)) {}
StopOnSignal(const StopOnSignal& other) : handler_(other.handler_) {}
bool operator()(int /*iter*/) {
return handler_->CheckForSignals() != SignalHandler::Action::STOP;
}
std::shared_ptr<SignalHandler> handler_;
};
/**
* Workspace is a class that holds all the related objects created during
* runtime: (1) all blobs, and (2) all instantiated networks. It is the owner of
* all these objects and deals with the scaffolding logistics.
*/
class TORCH_API Workspace {
public:
typedef std::function<bool(int)> ShouldContinue;
/**
* Initializes an empty workspace.
*/
Workspace() : Workspace(".", nullptr) {}
/**
* Initializes an empty workspace with the given root folder.
*
* For any operators that are going to interface with the file system, such
* as load operators, they will write things under this root folder given
* by the workspace.
*/
explicit Workspace(const string& root_folder)
: Workspace(root_folder, nullptr) {}
/**
* Initializes a workspace with a shared workspace.
*
* When we access a Blob, we will first try to access the blob that exists
* in the local workspace, and if not, access the blob that exists in the
* shared workspace. The caller keeps the ownership of the shared workspace
* and is responsible for making sure that its lifetime is longer than the
* created workspace.
*/
explicit Workspace(const Workspace* shared) : Workspace(".", shared) {}
/**
* Initializes workspace with parent workspace, blob name remapping
* (new name -> parent blob name), no other blobs are inherited from
* parent workspace
*/
Workspace(
const Workspace* shared,
const std::unordered_map<string, string>& forwarded_blobs)
: Workspace(".", nullptr) {
CAFFE_ENFORCE(shared, "Parent workspace must be specified");
for (const auto& forwarded : forwarded_blobs) {
CAFFE_ENFORCE(
shared->HasBlob(forwarded.second),
"Invalid parent workspace blob: ",
forwarded.second);
forwarded_blobs_[forwarded.first] =
std::make_pair(shared, forwarded.second);
}
}
/**
* Initializes a workspace with a root folder and a shared workspace.
*/
Workspace(const string& root_folder, const Workspace* shared)
: root_folder_(root_folder), shared_(shared), bookkeeper_(bookkeeper()) {
std::lock_guard<std::mutex> guard(bookkeeper_->wsmutex);
bookkeeper_->workspaces.insert(this);
}
~Workspace() {
if (FLAGS_caffe2_print_blob_sizes_at_exit) {
PrintBlobSizes();
}
// This is why we have a bookkeeper_ shared_ptr instead of a naked static! A
// naked static makes us vulnerable to out-of-order static destructor bugs.
std::lock_guard<std::mutex> guard(bookkeeper_->wsmutex);
bookkeeper_->workspaces.erase(this);
}
/**
* Adds blob mappings from workspace to the blobs from parent workspace.
* Creates blobs under possibly new names that redirect read/write operations
* to the blobs in the parent workspace.
* Arguments:
* parent - pointer to parent workspace
* forwarded_blobs - map from new blob name to blob name in parent's
* workspace skip_defined_blob - if set skips blobs with names that already
* exist in the workspace, otherwise throws exception
*/
void AddBlobMapping(
const Workspace* parent,
const std::unordered_map<string, string>& forwarded_blobs,
bool skip_defined_blobs = false);
/**
* Converts previously mapped tensor blobs to local blobs, copies values from
* parent workspace blobs into new local blobs. Ignores undefined blobs.
*/
template <class Context>
void CopyForwardedTensors(const std::unordered_set<std::string>& blobs) {
for (const auto& blob : blobs) {
auto it = forwarded_blobs_.find(blob);
if (it == forwarded_blobs_.end()) {
continue;
}
const auto& ws_blob = it->second;
const auto* parent_ws = ws_blob.first;
auto* from_blob = parent_ws->GetBlob(ws_blob.second);
CAFFE_ENFORCE(from_blob);
CAFFE_ENFORCE(
from_blob->template IsType<Tensor>(),
"Expected blob with tensor value",
ws_blob.second);
forwarded_blobs_.erase(blob);
auto* to_blob = CreateBlob(blob);
CAFFE_ENFORCE(to_blob);
const auto& from_tensor = from_blob->template Get<Tensor>();
auto* to_tensor = BlobGetMutableTensor(to_blob, Context::GetDeviceType());
to_tensor->CopyFrom(from_tensor);
}
}
/**
* Return list of blobs owned by this Workspace, not including blobs
* shared from parent workspace.
*/
vector<string> LocalBlobs() const;
/**
* Return a list of blob names. This may be a bit slow since it will involve
* creation of multiple temp variables. For best performance, simply use
* HasBlob() and GetBlob().
*/
vector<string> Blobs() const;
/**
* Return the root folder of the workspace.
*/
const string& RootFolder() { return root_folder_; }
/**
* Checks if a blob with the given name is present in the current workspace.
*/
inline bool HasBlob(const string& name) const {
// First, check the local workspace,
// Then, check the forwarding map, then the parent workspace
if (blob_map_.count(name)) {
return true;
}
auto it = forwarded_blobs_.find(name);
if (it != forwarded_blobs_.end()) {
const auto parent_ws = it->second.first;
const auto& parent_name = it->second.second;
return parent_ws->HasBlob(parent_name);
}
if (shared_) {
return shared_->HasBlob(name);
}
return false;
}
void PrintBlobSizes();
/**
* Creates a blob of the given name. The pointer to the blob is returned, but
* the workspace keeps ownership of the pointer. If a blob of the given name
* already exists, the creation is skipped and the existing blob is returned.
*/
Blob* CreateBlob(const string& name);
/**
* Similar to CreateBlob(), but it creates a blob in the local workspace even
* if another blob with the same name already exists in the parent workspace
* -- in such case the new blob hides the blob in parent workspace. If a blob
* of the given name already exists in the local workspace, the creation is
* skipped and the existing blob is returned.
*/
Blob* CreateLocalBlob(const string& name);
/**
* Remove the blob of the given name. Return true if removed and false if
* not exist.
* Will NOT remove from the shared workspace.
*/
bool RemoveBlob(const string& name);
/**
* Gets the blob with the given name as a const pointer. If the blob does not
* exist, a nullptr is returned.
*/
const Blob* GetBlob(const string& name) const;
/**
* Gets the blob with the given name as a mutable pointer. If the blob does
* not exist, a nullptr is returned.
*/
Blob* GetBlob(const string& name);
/**
* Renames a local workspace blob. If blob is not found in the local blob list
* or if the target name is already present in local or any parent blob list
* the function will throw.
*/
Blob* RenameBlob(const string& old_name, const string& new_name);
/**
* Creates a network with the given NetDef, and returns the pointer to the
* network. If there is anything wrong during the creation of the network, a
* nullptr is returned. The Workspace keeps ownership of the pointer.
*
* If there is already a net created in the workspace with the given name,
* CreateNet will overwrite it if overwrite=true is specified. Otherwise, an
* exception is thrown.
*/
NetBase* CreateNet(const NetDef& net_def, bool overwrite = false);
NetBase* CreateNet(
const std::shared_ptr<const NetDef>& net_def,
bool overwrite = false);
/**
* Gets the pointer to a created net. The workspace keeps ownership of the
* network.
*/
NetBase* GetNet(const string& net_name);
/**
* Deletes the instantiated network with the given name.
*/
void DeleteNet(const string& net_name);
/**
* Finds and runs the instantiated network with the given name. If the network
* does not exist or there are errors running the network, the function
* returns false.
*/
bool RunNet(const string& net_name);
/**
* Returns a list of names of the currently instantiated networks.
*/
vector<string> Nets() const {
vector<string> names;
for (auto& entry : net_map_) {
names.push_back(entry.first);
}
return names;
}
/**
* Runs a plan that has multiple nets and execution steps.
*/
bool RunPlan(const PlanDef& plan_def,
ShouldContinue should_continue = StopOnSignal{});
/*
* Returns a CPU threadpool instance for parallel execution of
* work. The threadpool is created lazily; if no operators use it,
* then no threadpool will be created.
*/
ThreadPool* GetThreadPool();
// RunOperatorOnce and RunNetOnce runs an operator or net once. The difference
// between RunNet and RunNetOnce lies in the fact that RunNet allows you to
// have a persistent net object, while RunNetOnce creates a net and discards
// it on the fly - this may make things like database read and random number
// generators repeat the same thing over multiple calls.
bool RunOperatorOnce(const OperatorDef& op_def);
bool RunNetOnce(const NetDef& net_def);
/**
* Applies a function f on each workspace that currently exists.
*
* This function is thread safe and there is no race condition between
* workspaces being passed to f in this thread and destroyed in another.
*/
template <typename F>
static void ForEach(F f) {
auto bk = bookkeeper();
std::lock_guard<std::mutex> guard(bk->wsmutex);
for (Workspace* ws : bk->workspaces) {
f(ws);
}
}
public:
std::atomic<int> last_failed_op_net_position{};
private:
struct Bookkeeper {
std::mutex wsmutex;
std::unordered_set<Workspace*> workspaces;
};
static std::shared_ptr<Bookkeeper> bookkeeper();
std::unordered_map<string, unique_ptr<Blob>> blob_map_;
const string root_folder_;
const Workspace* shared_;
std::unordered_map<string, std::pair<const Workspace*, string>>
forwarded_blobs_;
std::unique_ptr<ThreadPool> thread_pool_;
std::mutex thread_pool_creation_mutex_;
std::shared_ptr<Bookkeeper> bookkeeper_;
std::unordered_map<string, unique_ptr<NetBase>> net_map_;
C10_DISABLE_COPY_AND_ASSIGN(Workspace);
};
} // namespace caffe2
#endif // CAFFE2_CORE_WORKSPACE_H_
| 11,305
| 31.962099
| 80
|
h
|
null |
pytorch-main/caffe2/core/hip/common_miopen.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_CORE_COMMON_MIOPEN_H_
#define CAFFE2_CORE_COMMON_MIOPEN_H_
#include <array>
#include <mutex>
#include "miopen/miopen.h"
#include "caffe2/core/common.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/types.h"
#include "caffe2/proto/caffe2_pb.h"
#define MIOPEN_VERSION 1399
namespace caffe2 {
namespace internal {
/**
* A helper function to obtain miopen error strings.
*/
inline const char* miopenGetErrorString(miopenStatus_t status)
{
switch(status)
{
case miopenStatusSuccess: return "MIOPEN_STATUS_SUCCESS";
case miopenStatusNotInitialized: return "MIOPEN_STATUS_NOT_INITIALIZED";
case miopenStatusAllocFailed: return "MIOPEN_STATUS_ALLOC_FAILED";
case miopenStatusBadParm: return "MIOPEN_STATUS_BAD_PARAM";
case miopenStatusInternalError: return "MIOPEN_STATUS_INTERNAL_ERROR";
case miopenStatusInvalidValue: return "MIOPEN_STATUS_INVALID_VALUE";
case miopenStatusNotImplemented: return "MIOPEN_STATUS_NOT_SUPPORTED";
case miopenStatusUnknownError: return "MIOPEN_STATUS_UNKNOWN_ERROR";
default: return "MIOPEN_STATUS_UNKNOWN_ERROR";
}
}
} // namespace internal
// A macro that wraps around a miopen statement so we can check if the miopen
// execution finishes or not.
#define MIOPEN_ENFORCE(condition) \
do \
{ \
miopenStatus_t status = condition; \
CAFFE_ENFORCE_EQ(status, \
miopenStatusSuccess, \
", Error at: ", \
__FILE__, \
":", \
__LINE__, \
": ", \
::caffe2::internal::miopenGetErrorString(status)); \
} while(0)
#define MIOPEN_CHECK(condition) \
do \
{ \
miopenStatus_t status = condition; \
CHECK(status == miopenStatusSuccess) << ::caffe2::internal::miopenGetErrorString(status); \
} while(0)
// report the version of miopen Caffe2 was compiled with
inline size_t miopenCompiledVersion() { return MIOPEN_VERSION; }
// report the runtime version of miopen
inline size_t miopenRuntimeVersion() { return MIOPEN_VERSION; }
// Check compatibility of compiled and runtime miopen versions
inline void CheckMIOPENVersions() {}
/**
* miopenTypeWrapper is a wrapper class that allows us to refer to the miopen type
* in a template function. The class is specialized explicitly for different
* data types below.
*/
template <typename T>
class miopenTypeWrapper;
template <>
class miopenTypeWrapper<float>
{
public:
static const miopenDataType_t type = miopenFloat;
typedef const float ScalingParamType;
typedef float BNParamType;
static ScalingParamType* kOne()
{
static ScalingParamType v = 1.0;
return &v;
}
static const ScalingParamType* kZero()
{
static ScalingParamType v = 0.0;
return &v;
}
};
template <>
class miopenTypeWrapper<at::Half>
{
public:
static const miopenDataType_t type = miopenHalf;
typedef const float ScalingParamType;
typedef float BNParamType;
static ScalingParamType* kOne()
{
static ScalingParamType v = 1.0;
return &v;
}
static ScalingParamType* kZero()
{
static ScalingParamType v = 0.0;
return &v;
}
};
/**
* miopenTensorDescWrapper is the placeholder that wraps around a
* miopenTensorDescriptor_t, allowing us to do descriptor change as-needed during
* runtime.
*/
class miopenTensorDescWrapper
{
public:
miopenTensorDescWrapper() { MIOPEN_ENFORCE(miopenCreateTensorDescriptor(&desc_)); }
~miopenTensorDescWrapper() noexcept { MIOPEN_CHECK(miopenDestroyTensorDescriptor(desc_)); }
inline miopenTensorDescriptor_t
Descriptor(const miopenDataType_t type, const vector<int>& dims, bool* changed)
{
if(type_ == type && dims_ == dims)
{
// if not changed, simply return the current descriptor.
if(changed)
*changed = false;
return desc_;
}
CAFFE_ENFORCE_EQ(
dims.size(), 4, "MIOPEN currently only support 4-dimensional tensor descriptor");
type_ = type;
dims_ = dims;
MIOPEN_ENFORCE(
miopenSet4dTensorDescriptor(desc_, type, dims_[0], dims_[1], dims_[2], dims_[3]));
if(changed)
*changed = true;
return desc_;
}
template <typename T>
inline miopenTensorDescriptor_t Descriptor(const StorageOrder& order, const vector<int>& dims)
{
return Descriptor(miopenTypeWrapper<T>::type, dims, nullptr);
}
private:
miopenTensorDescriptor_t desc_;
miopenDataType_t type_;
vector<int> dims_;
C10_DISABLE_COPY_AND_ASSIGN(miopenTensorDescWrapper);
};
} // namespace caffe2
#endif // CAFFE2_CORE_COMMON_MIOPEN_H_
| 6,281
| 34.094972
| 99
|
h
|
null |
pytorch-main/caffe2/core/hip/miopen_wrapper.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef CAFFE2_CORE_MIOPEN_WRAPPERS_H_
#define CAFFE2_CORE_MIOPEN_WRAPPERS_H_
#include "caffe2/core/hip/common_miopen.h"
#include "caffe2/core/hip/context_gpu.h"
#include <c10/hip/HIPGuard.h>
namespace caffe2 {
class MIOPENWrapper;
/**
* MIOPENWorkspace is a wrapper around a raw cuda pointer that holds the miopen
* scratch space. This struct is meant to be only used in MIOPENWrapper to
* provide a program-wide scratch space for MIOPEN. The reason behind it is that
* miopen function calls are usually very efficient, hence one probably does not
* want to run multiple miopen calls at the same time. As a result, one should
* not need more than one miopen workspace per device.
*/
struct MIOPENWorkspace
{
~MIOPENWorkspace() noexcept {}
void* get(size_t nbytes)
{
if(nbytes_ < nbytes)
{
reset();
data_ = HIPContext::New(nbytes);
nbytes_ = nbytes;
}
CAFFE_ENFORCE_GE(nbytes_, nbytes);
return data_.get();
}
void reset()
{
data_.clear();
nbytes_ = 0;
}
private:
at::DataPtr data_;
size_t nbytes_{0};
};
// MIOPENState is the owner of the MIOPENWorkspace, and serializes all
// executions of operations that use the state onto it's own stream
// (so multiple Net workers can reuse the same workspace from
// different threads and HIP streams).
class MIOPENState
{
public:
explicit MIOPENState(size_t gpu_id) : gpu_id_(gpu_id)
{
HIPGuard g(gpu_id_);
MIOPEN_ENFORCE(miopenCreate(&miopen_handle_));
HIP_ENFORCE(hipEventCreate(&before_));
HIP_ENFORCE(hipEventCreate(&after_));
HIP_ENFORCE(hipStreamCreate(&stream_));
MIOPEN_ENFORCE(miopenSetStream(miopen_handle_, stream_));
}
~MIOPENState() noexcept
{
HIPGuard g(gpu_id_);
MIOPEN_CHECK(miopenDestroy(miopen_handle_));
HIP_CHECK(hipStreamDestroy(stream_));
HIP_CHECK(hipEventDestroy(after_));
HIP_CHECK(hipEventDestroy(before_));
}
miopenHandle_t& miopen_handle() { return miopen_handle_; }
MIOPENWorkspace& workspace() { return workspace_; }
template <typename F>
void execute(hipStream_t stream, F&& f)
{
HIP_ENFORCE(hipEventRecord(before_, stream));
HIP_ENFORCE(hipStreamWaitEvent(stream_, before_, 0));
f(this);
HIP_ENFORCE(hipEventRecord(after_, stream_));
HIP_ENFORCE(hipStreamWaitEvent(stream, after_, 0));
}
private:
miopenHandle_t miopen_handle_{nullptr};
hipEvent_t before_{nullptr};
hipEvent_t after_{nullptr};
hipStream_t stream_{nullptr};
MIOPENWorkspace workspace_;
size_t gpu_id_{0};
C10_DISABLE_COPY_AND_ASSIGN(MIOPENState);
};
/**
* MIOPENWrapper is a class that wraps the miopen handles and miopen workspaces.
*
* The wrapper ensures that for each thread and each gpu, there is one
* identical miopen handle, which is also associated with the thread-local
* per-device hip stream. The wrapper also hosts the device-specific miopen
* workspace (scratch space for some miopen functions).
*
*/
class MIOPENWrapper
{
public:
/**
* Creates a miopen wrapper associated with a HIPContext object. Note that
* the HIPContext object should outlive the MIOPENWrapper.
*/
explicit MIOPENWrapper(HIPContext* context) : context_(context) {}
/**
* Returns the inline miopen handle that executes on the current
* thread's hip_stream.
*/
miopenHandle_t inline_miopen_handle() { return context_->miopen_handle(); }
// Executes the closure F on the MIOPENState associated with state_idx
template <typename F>
void with_miopen_state(size_t state_idx, F&& f)
{
CAFFE_ENFORCE(state_idx < CAFFE2_COMPILE_TIME_MAX_MIOPEN_STATES, "Invalid state_idx");
auto& sync_state = miopen_states()[context_->device_id()][state_idx];
HIPGuard dg(context_->device_id());
// We need to serialize execution on the MIOPENState as we can't
// allow multiple threads to race through the cudaEventRecord
// calls (so a worker thread might wait on another worker thread's
// execution)
std::lock_guard<std::mutex> g(sync_state.mutex);
if(!sync_state.state.get())
{
sync_state.state.reset(new MIOPENState(context_->device_id()));
}
TORCH_CHECK_NOTNULL(sync_state.state.get())->execute(context_->hip_stream(), f);
}
protected:
// Pointer to an external cuda context that the miopen wrapper will use.
HIPContext* context_;
static constexpr size_t CAFFE2_COMPILE_TIME_MAX_MIOPEN_STATES = 4;
struct SyncedMIOPENState
{
std::mutex mutex;
std::unique_ptr<MIOPENState> state;
};
using PerGPUMIOPENStates = std::array<
std::array<SyncedMIOPENState, CAFFE2_COMPILE_TIME_MAX_MIOPEN_STATES>,
C10_COMPILE_TIME_MAX_GPUS>;
static PerGPUMIOPENStates& miopen_states();
C10_DISABLE_COPY_AND_ASSIGN(MIOPENWrapper);
};
}; // namespace caffe2
#endif
| 5,150
| 29.844311
| 94
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Converters/Dot.h
|
#ifndef NOM_CONVERTERS_DOT_H
#define NOM_CONVERTERS_DOT_H
#include "c10/util/irange.h"
#include "nomnigraph/Graph/Algorithms.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Support/Casting.h"
#include <functional>
#include <iostream>
#include <map>
#include <queue>
#include <sstream>
#include <unordered_map>
#include <vector>
namespace nom {
namespace converters {
template <typename GraphT>
class DotGenerator {
public:
using NodePrinter = std::function<std::map<std::string, std::string>(
typename GraphT::NodeRef)>;
using EdgePrinter = std::function<std::map<std::string, std::string>(
typename GraphT::EdgeRef)>;
static std::map<std::string, std::string> defaultEdgePrinter(
typename GraphT::EdgeRef) {
std::map<std::string, std::string> labelMap;
return labelMap;
}
DotGenerator(NodePrinter nodePrinter, EdgePrinter edgePrinter)
: nodePrinter_(nodePrinter), edgePrinter_(edgePrinter) {}
// Convert a graph (with optional subgraphs cluster) to dot.
std::string convert(
const typename GraphT::SubgraphType& sg,
const std::vector<typename GraphT::SubgraphType*>& subgraphs) const {
std::ostringstream output;
output << "digraph G {\nrankdir=LR\n";
for (const auto& node : sg.getNodes()) {
generateNode(node, sg, output);
}
for (const auto i : c10::irange(subgraphs.size())) {
const auto& subgraph = subgraphs[i];
output << "subgraph cluster" << i << " {\n";
output << "style=dotted;\n";
for (const auto& node : subgraph->getNodes()) {
output << node;
output << ";\n";
}
output << "}\n";
}
output << "}";
return output.str();
}
// Convert a subgraph to dot.
std::string convert(const typename GraphT::SubgraphType& sg) const {
std::ostringstream output;
output << "digraph G {\nrankdir=LR\n";
for (const auto& node : sg.getNodes()) {
generateNode(node, sg, output);
}
output << "}";
return output.str();
}
/**
* NOTE No subgraph support
* Converts given graph into DOT string w/operator input-order preserved
* Assumes graph is acyclic, nodes are unique_ptr
* (1) Get & print input nodes (nodes w/o parents)
* - Node: <p0>[shape=record, label="{{Data In}|{<p0>*}}"]
* (2) Find operators w/BFS from input nodes
* (3) Print operator records & incoming edges
* - Node: op_ptr[shape=record, label="{{<i0>*|<i1>*|...}|{op}|{<o0>*}"]
* - Edge: <parent_node_ptr>:<ref>:s -> <this_node_ptr>:<ref>:n
*/
std::string convertStruct(const typename GraphT::SubgraphType& sg) const {
std::ostringstream output;
output << "digraph G {\nrankdir=LR\n";
// Get input nodes (nodes w/o parents)
std::unordered_map<typename GraphT::NodeRef, int>
nodeDepthMap; // Touched nodes for BFS
std::queue<typename GraphT::NodeRef> workList; // Init w/parentless nodes
for (const auto& node : sg.getNodes()) {
if (node->getInEdges().size() == 0 && node->getOutEdges().size() > 0) {
// Add input node to dot string
output << (uint64_t)node << "[shape=record, label=\"{{Data In}|{<"
<< (uint64_t)node << ">";
for (const auto& attr : nodePrinter_(node)) {
output << attr.second;
}
output << "}}\"]\n";
// Track input node
nodeDepthMap[node] = 0;
workList.push(node);
}
}
// BFS to get operator nodes
std::vector<typename GraphT::NodeRef> ops;
while (workList.size() > 0) {
const auto& node = workList.front();
for (const auto& edge : node->getOutEdges()) {
// Enqueue child iff not touched yet
const auto& child = edge->head();
if (!nodeDepthMap.count(child)) {
nodeDepthMap[child] = nodeDepthMap[node] + 1;
workList.push(child);
if (nodeDepthMap[child] % 2 == 1) { // "odd" ==> operator
ops.emplace_back(child);
}
} else {
}
}
workList.pop();
}
// Finalize output
output << getOperatorSubtreeDotString(ops) << "}\n";
return output.str();
}
private:
NodePrinter nodePrinter_;
EdgePrinter edgePrinter_;
/**
* Get DOT string record of given operator and DOT string of its input edges
* @param op operator to parse
* @param nodePrinter node attribute extractor
* @return '\n' sep string of operator & input edges
*/
std::string getOperatorDotString(typename GraphT::NodeRef op) const {
std::ostringstream output;
std::ostringstream record; // Operator node record
record << (uint64_t)op << "[shape=record, label=\"{{";
// Input refs
std::string sep = "";
for (const auto& opInEdge : op->getInEdges()) {
// Draw edge between prev. op output to cur. op input
const auto& input = opInEdge->tail();
int inputInEdgeCt = input->getInEdges().size();
if (inputInEdgeCt == 0) { // Node @ top of subgraph
output << (uint64_t)input;
} else { // Node between operators
assert(inputInEdgeCt == 1);
output << (uint64_t)input->getInEdges().at(0)->tail();
}
output << ":" << (uint64_t)input << ":s -> " << (uint64_t)op << ":"
<< (uint64_t)input << ":n\n";
// Add input to operator record
record << sep << "<" << (uint64_t)input << ">";
for (const auto& attr : nodePrinter_(input)) {
record << attr.second;
}
sep = "|";
}
// Extract operator name
record << "}|{";
for (const auto& attr : nodePrinter_(op)) {
record << attr.second;
}
record << "}|{";
// Output refs
sep = "";
for (const auto& edge : op->getOutEdges()) {
const auto& child = edge->head();
record << sep << "<" << (uint64_t)child << ">";
for (const auto& attr : nodePrinter_(child)) {
record << attr.second;
}
sep = "|";
}
// Append record to output string
output << record.str() << "}}\"]\n";
return output.str();
}
/**
* Prints DOT string of given operator subgraph
* @param ops operators in a given subgraph
* @param nodePrinter node attribute extractor
* @return DOT string that renders operators subgraph
*/
std::string getOperatorSubtreeDotString(
std::vector<typename GraphT::NodeRef> ops) const {
std::ostringstream output;
for (const auto& op : ops) {
output << getOperatorDotString(op);
}
return output.str();
}
// Generate dot string for a node.
void generateNode(
typename GraphT::NodeRef node,
const typename GraphT::SubgraphType& sg,
std::ostringstream& output) const {
output << (uint64_t)node; // dot doesn't like hex
output << "[";
for (const auto& attrib : nodePrinter_(node)) {
output << attrib.first << "=\"" << attrib.second << "\",";
}
output << "];\n";
for (const auto& edge : node->getOutEdges()) {
if (!sg.hasEdge(edge)) {
continue;
}
output << (uint64_t)edge->tail() << " -> " << (uint64_t)edge->head();
output << "[";
for (const auto& attrib : edgePrinter_(edge)) {
output << attrib.first << "=\"" << attrib.second << "\",";
}
output << "];\n";
}
}
};
// Convert a graph to dot string.
template <typename GraphT>
std::string convertToDotString(
GraphT* g,
typename DotGenerator<GraphT>::NodePrinter nodePrinter,
typename DotGenerator<GraphT>::EdgePrinter edgePrinter =
DotGenerator<GraphT>::defaultEdgePrinter) {
auto d = DotGenerator<GraphT>(nodePrinter, edgePrinter);
return d.convert(algorithm::createSubgraph(g), {});
}
// Convert a graph to dot string and annotate subgraph clusters.
template <typename GraphT>
std::string convertToDotString(
GraphT* g,
const std::vector<typename GraphT::SubgraphType*>& subgraphs,
typename DotGenerator<GraphT>::NodePrinter nodePrinter,
typename DotGenerator<GraphT>::EdgePrinter edgePrinter =
DotGenerator<GraphT>::defaultEdgePrinter) {
auto d = DotGenerator<GraphT>(nodePrinter, edgePrinter);
return d.convert(algorithm::createSubgraph(g), subgraphs);
}
// Convert a subgraph to dot string.
template <typename GraphT>
std::string convertToDotString(
const typename GraphT::SubgraphType& sg,
typename DotGenerator<GraphT>::NodePrinter nodePrinter,
typename DotGenerator<GraphT>::EdgePrinter edgePrinter =
DotGenerator<GraphT>::defaultEdgePrinter) {
auto d = DotGenerator<GraphT>(nodePrinter, edgePrinter);
return d.convert(sg);
}
template <typename GraphT>
std::string convertToDotRecordString(
GraphT* g,
typename DotGenerator<GraphT>::NodePrinter nodePrinter,
typename DotGenerator<GraphT>::EdgePrinter edgePrinter =
DotGenerator<GraphT>::defaultEdgePrinter) {
auto d = DotGenerator<GraphT>(nodePrinter, edgePrinter);
return d.convertStruct(algorithm::createSubgraph(g));
}
} // namespace converters
} // namespace nom
#endif // NOM_CONVERTERS_DOT_H
| 9,062
| 31.483871
| 78
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Graph/Algorithms.h
|
//===- nomnigraph/Graph/Algorithms.h - Graph algorithms ---------*- C++ -*-===//
//
// TODO Licensing.
//
//===----------------------------------------------------------------------===//
//
// This file defines algorithms that only require Graph level annotations.
// Tarjans is defined.
//
//===----------------------------------------------------------------------===//
#ifndef NOM_GRAPH_ALGORITHMS_H
#define NOM_GRAPH_ALGORITHMS_H
#include <assert.h>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "nomnigraph/Graph/BinaryMatchImpl.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Graph/TarjansImpl.h"
#include "nomnigraph/Graph/TopoSort.h"
namespace nom {
namespace algorithm {
/// \brief Helper for dominator tree finding.
template <typename G>
void reachable(
typename G::NodeRef root,
typename G::NodeRef ignored,
std::unordered_set<typename G::NodeRef>* seen) {
seen->insert(root);
for (const auto& outEdge : root->getOutEdges()) {
auto& newNode = outEdge->head();
if (newNode != ignored && (seen->find(newNode) == seen->end())) {
reachable<G>(newNode, ignored, seen);
}
}
}
/// \brief A dominator tree finder. Runs in O(M*N), there exist
/// more efficient implementations.
///
/// High level description of the algorithm:
///
/// 1) Find a map of {node}->{dominator set}
/// --
/// allNodes = reachable(root)
/// for n in nodes:
/// temporarily delete n from the graph
/// dom[n] = allNodes - reachable(root)
/// restore n to the graph
///
/// 2) Construct tree from that map
/// --
/// starting at root, BFS in dominatorMap:
/// if newnode has inedge, delete it
/// draw edge from parent to child
template <typename G>
Graph<typename G::NodeRef> dominatorTree(
G* g,
typename G::NodeRef source = nullptr) {
assert(
g->getMutableNodes().size() > 0 &&
"Cannot find dominator tree of empty graph.");
if (!source) {
auto rootSCC = tarjans(g).back();
assert(
rootSCC.getNodes().size() == 1 &&
"Cannot determine source node topologically, please specify one.");
for (auto& node : rootSCC.getNodes()) {
source = node;
break;
}
}
Graph<typename G::NodeRef> tree;
std::unordered_map<
typename G::NodeRef,
typename Graph<typename G::NodeRef>::NodeRef>
mapToTreeNode;
std::unordered_map<
typename G::NodeRef,
std::unordered_set<typename G::NodeRef>>
dominatorMap;
for (auto node : g->getMutableNodes()) {
mapToTreeNode[node] = tree.createNode(std::move(node));
if (node == source) {
continue;
}
dominatorMap[source].insert(node);
}
for (const auto& node : g->getMutableNodes()) {
if (node == source) {
continue;
}
std::unordered_set<typename G::NodeRef> seen;
std::unordered_set<typename G::NodeRef> dominated;
reachable<G>(source, node, &seen);
for (auto testNode : dominatorMap[source]) {
if (seen.find(testNode) == seen.end() && testNode != node) {
dominated.insert(testNode);
}
}
dominatorMap[node] = dominated;
}
std::unordered_set<typename G::NodeRef> nextPass;
nextPass.insert(source);
while (nextPass.size()) {
for (auto parent_iter = nextPass.begin(); parent_iter != nextPass.end();) {
auto parent = *parent_iter;
for (auto child : dominatorMap[parent]) {
while (mapToTreeNode[child]->getInEdges().size()) {
tree.deleteEdge(mapToTreeNode[child]->getInEdges().front());
}
tree.createEdge(mapToTreeNode[parent], mapToTreeNode[child]);
if (dominatorMap.find(child) != dominatorMap.end()) {
nextPass.insert(child);
}
}
nextPass.erase(parent_iter++);
}
}
return tree;
}
/// \brief Map all nodes in the graph to their immediate dominators.
template <typename G>
std::unordered_map<typename G::NodeRef, typename G::NodeRef>
immediateDominatorMap(G* g, typename G::NodeRef source = nullptr) {
std::unordered_map<typename G::NodeRef, typename G::NodeRef> idomMap;
auto idomTree = dominatorTree(g, source);
for (auto node : idomTree.getMutableNodes()) {
// Sanity check, really should never happen.
assert(
node->getInEdges().size() <= 1 &&
"Invalid dominator tree generated from graph, cannot determing idom map.");
// In degenerate cases, or for the root node, we self dominate.
if (node->getInEdges().size() == 0) {
idomMap[node->data()] = node->data();
} else {
auto idom = node->getInEdges()[0]->tail();
idomMap[node->data()] = idom->data();
}
}
return idomMap;
}
/// \brief Map all nodes to their dominance frontiers:
/// a set of nodes that does not strictly dominate the given node but does
/// dominate an immediate predecessor. This is useful as it is the exact
/// location for the insertion of phi nodes in SSA representation.
template <typename G>
std::unordered_map<typename G::NodeRef, std::unordered_set<typename G::NodeRef>>
dominanceFrontierMap(G* g, typename G::NodeRef source = nullptr) {
auto idomMap = immediateDominatorMap(g, source);
std::unordered_map<
typename G::NodeRef,
std::unordered_set<typename G::NodeRef>>
domFrontierMap;
for (const auto node : g->getMutableNodes()) {
if (node->getInEdges().size() < 2) {
continue;
}
for (auto inEdge : node->getInEdges()) {
auto predecessor = inEdge->tail();
// This variable will track all the way up the dominator tree.
auto runner = predecessor;
while (runner != idomMap[node]) {
domFrontierMap[runner].insert(node);
runner = idomMap[runner];
}
}
}
return domFrontierMap;
}
/// \brief Induces edges on a subgraph by connecting all nodes
/// that are connected in the original graph.
template <typename SubgraphType>
void induceEdges(SubgraphType* sg) {
for (auto& node : sg->getNodes()) {
// We can scan only the inEdges
for (auto& inEdge : node->getInEdges()) {
if (sg->hasNode(inEdge->tail())) {
sg->addEdge(inEdge);
}
}
}
}
/// \brief Create subgraph object from graph.
template <typename GraphType>
typename GraphType::SubgraphType createSubgraph(GraphType* g) {
typename GraphType::SubgraphType subgraph;
for (auto& node : g->getMutableNodes()) {
subgraph.addNode(node);
}
induceEdges(&subgraph);
return subgraph;
}
} // namespace algorithm
} // namespace nom
#endif // NOM_GRAPH_ALGORITHMS_H
| 6,493
| 29.345794
| 83
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Graph/BinaryMatchImpl.h
|
#ifndef NOM_GRAPH_BINARYMATCHIMPL_H
#define NOM_GRAPH_BINARYMATCHIMPL_H
#include "nomnigraph/Graph/Graph.h"
namespace nom {
namespace algorithm {
/// \brief A binary graph matching algorithm based on Kahn's algorithm.
template <typename F, typename T, typename... U>
std::vector<Subgraph<T, U...>> binaryMatch(Graph<T, U...>* g, F condition) {
using G = Graph<T, U...>;
auto swappableCondition = [&](typename G::NodeRef m, bool match) {
return match ? condition(m) : !condition(m);
};
auto edges = g->getMutableEdges();
std::unordered_set<typename G::EdgeRef> edgeSet(edges.begin(), edges.end());
// Topologically sorted matching subgraphs.
std::vector<Subgraph<T, U...>> sortedNodes;
// Find the initial frontier.
std::vector<typename G::NodeRef> frontier;
std::vector<typename G::NodeRef> nextFrontier;
for (auto n : g->getMutableNodes()) {
if (n->getInEdges().size() == 0) {
if (condition(n)) {
frontier.emplace_back(n);
} else {
nextFrontier.emplace_back(n);
}
}
}
auto stillHasInEdge = [&](typename G::NodeRef m) {
for (auto inEdge : m->getInEdges()) {
if (edgeSet.count(inEdge)) {
return true;
}
}
return false;
};
// This boolean will store which type of match we are looking for.
// If true we are looking for the condition to return true,
// if false we are looking for the condition to return false
bool match = true;
// Only if we currently have a frontier should we add a subgraph to the
// vector of matches.
if (frontier.size()) {
sortedNodes.emplace_back();
}
// As long as there is a frontier we continue the algorithm.
while (frontier.size() || nextFrontier.size()) {
// Swap everything if we exhausted the current frontier.
if (!frontier.size() && nextFrontier.size()) {
frontier = nextFrontier;
nextFrontier.clear();
match = !match;
if (match) {
sortedNodes.emplace_back();
}
}
// The main algorithm is inspired by
// https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm
// originally written by @yinghai
auto n = frontier.back();
if (match) {
sortedNodes.back().addNode(n);
}
frontier.pop_back();
for (auto outEdge : n->getOutEdges()) {
auto m = outEdge->head();
if (!edgeSet.count(outEdge)) {
continue;
}
edgeSet.erase(outEdge);
if (!stillHasInEdge(m)) {
if (swappableCondition(m, match)) {
frontier.emplace_back(m);
} else {
nextFrontier.emplace_back(m);
}
}
}
}
if (edgeSet.size()) {
assert(
0 &&
"Invalid graph for Kahn's algorithm, cycle detected. Please use Tarjans.");
}
return sortedNodes;
}
} // namespace algorithm
} // namespace nom
#endif // NOM_GRAPH_BINARYMATCHIMPL_H
| 2,874
| 25.376147
| 84
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Graph/Graph.h
|
//===- nomnigraph/Graph/Graph.h - Basic graph implementation ----*- C++ -*-===//
//
// This file defines a basic graph API for generic and flexible use with
// graph algorithms.
//
//===----------------------------------------------------------------------===//
#ifndef NOM_GRAPH_GRAPH_H
#define NOM_GRAPH_GRAPH_H
#include "caffe2/core/common.h"
#include "nomnigraph/Support/Common.h"
#include <algorithm>
#include <iterator>
#include <list>
#include <unordered_set>
#include <utility>
#include <vector>
#include <assert.h>
#include <stdio.h>
#define DEBUG_PRINT(...)
namespace nom {
template <typename T, typename... U>
class Graph;
template <typename T, typename... U>
class Node;
// Template types:
// T : Data stored within a node.
// U...: Data stored within an edge. When this type is not
// specified, an empty StorageType is used. If it is
// specified, only a single type should be given (as supported
// by the underlying StorageType class).
// \brief Edge within a Graph.
template <typename T, typename... U>
class Edge : public ::StorageType<U...> {
public:
using NodeRef = typename Graph<T, U...>::NodeRef;
Edge(NodeRef tail, NodeRef head, U... args)
: ::StorageType<U...>(std::forward<U...>(args)...),
tail_(tail),
head_(head) {
DEBUG_PRINT("Creating instance of Edge: %p\n", this);
}
const NodeRef& tail() const {
return tail_;
}
const NodeRef& head() const {
return head_;
}
void setTail(NodeRef n) {
tail_ = n;
}
void setHead(NodeRef n) {
head_ = n;
}
private:
NodeRef tail_;
NodeRef head_;
friend class Graph<T, U...>;
};
// \brief Node within a Graph.
template <typename T, typename... U>
class Node : public ::StorageType<T>, public Notifier<Node<T, U...>> {
public:
using NodeRef = typename Graph<T, U...>::NodeRef;
using EdgeRef = typename Graph<T, U...>::EdgeRef;
/// \brief Create a node with data.
explicit Node(T&& data) : ::StorageType<T>(std::move(data)) {
DEBUG_PRINT("Creating instance of Node: %p\n", this);
}
/// \brief Create an empty node.
explicit Node() : ::StorageType<T>() {}
Node(Node&&) = default;
Node(const Node&) = delete;
Node& operator=(const Node&) = delete;
/// \brief Adds an edge by reference to known in-edges.
/// \p e A reference to an edge that will be added as an in-edge.
void addInEdge(EdgeRef e) {
inEdges_.emplace_back(e);
}
/// \brief Adds an edge by reference to known out-edges.
/// \p e A reference to an edge that will be added as an out-edge.
void addOutEdge(EdgeRef e) {
outEdges_.emplace_back(e);
}
/// \brief Removes an edge by reference to known in-edges.
/// \p e A reference to an edge that will be removed from in-edges.
void removeInEdge(EdgeRef e) {
removeEdgeInternal(inEdges_, e);
}
/// \brief Removes an edge by reference to known out-edges.
/// \p e A reference to an edge that will be removed from out-edges.
void removeOutEdge(EdgeRef e) {
removeEdgeInternal(outEdges_, e);
}
const std::vector<EdgeRef>& getOutEdges() const {
return outEdges_;
}
const std::vector<EdgeRef>& getInEdges() const {
return inEdges_;
}
void setInEdges(std::vector<EdgeRef> edges) {
inEdges_ = std::move(edges);
}
void setOutEdges(std::vector<EdgeRef> edges) {
outEdges_ = std::move(edges);
}
private:
std::vector<EdgeRef> inEdges_;
std::vector<EdgeRef> outEdges_;
friend class Graph<T, U...>;
void removeEdgeInternal(std::vector<EdgeRef>& edges, EdgeRef e) {
auto iter = std::find(edges.begin(), edges.end(), e);
assert(
iter != edges.end() &&
"Attempted to remove edge that isn't connected to this node");
edges.erase(iter);
}
};
/// \brief Effectively a constant reference to a graph.
///
/// \note A Subgraph could actually point to an entire Graph.
///
/// Subgraphs can only contain references to nodes/edges in a Graph.
/// They are technically mutable, but this should be viewed as a construction
/// helper rather than a fact to be exploited. There are no deleters,
/// for example.
///
template <typename T, typename... U>
class Subgraph {
public:
Subgraph() {
DEBUG_PRINT("Creating instance of Subgraph: %p\n", this);
}
using NodeRef = typename Graph<T, U...>::NodeRef;
using EdgeRef = typename Graph<T, U...>::EdgeRef;
void addNode(NodeRef n) {
nodes_.insert(n);
}
bool hasNode(NodeRef n) const {
return nodes_.count(n) != 0;
}
void removeNode(NodeRef n) {
nodes_.erase(n);
}
void addEdge(EdgeRef e) {
edges_.insert(e);
}
bool hasEdge(EdgeRef e) const {
return edges_.count(e) != 0;
}
void removeEdge(EdgeRef e) {
edges_.erase(e);
}
const std::unordered_set<NodeRef>& getNodes() const {
return nodes_;
}
size_t getNodesCount() const {
return (size_t)nodes_.size();
}
const std::unordered_set<EdgeRef>& getEdges() const {
return edges_;
}
private:
std::unordered_set<NodeRef> nodes_;
std::unordered_set<EdgeRef> edges_;
void printEdges() {
for (const auto& edge : edges_) {
printf("Edge: %p (%p -> %p)\n", &edge, edge->tail(), edge->head());
}
}
void printNodes() const {
for (const auto& node : nodes_) {
printf("Node: %p\n", node);
}
}
};
/// \brief A simple graph implementation
///
/// Everything is owned by the graph to simplify storage concerns.
///
template <typename T, typename... U>
class Graph {
public:
using SubgraphType = Subgraph<T, U...>;
using NodeRef = Node<T, U...>*;
using EdgeRef = Edge<T, U...>*;
Graph() {
DEBUG_PRINT("Creating instance of Graph: %p\n", this);
}
Graph(const Graph&) = delete;
Graph(Graph&&) = default;
Graph& operator=(Graph&&) = default;
~Graph() {}
/// \brief Creates a node and retains ownership of it.
/// \p data An rvalue of the data being held in the node.
/// \return A reference to the node created.
NodeRef createNode(T&& data) {
return createNodeInternal(Node<T, U...>(std::move(data)));
}
template <class Arg>
NodeRef createNode(Arg&& arg) {
return createNode(T(std::forward<Arg>(arg)));
}
NodeRef createNode() {
return createNodeInternal(Node<T, U...>());
}
// Note:
// The move functions below are unsafe. Use them with caution
// and be sure to call isValid() after each use.
// Move a node from this graph to the destGraph
void moveNode(NodeRef node, Graph<T, U...>* destGraph) {
assert(hasNode(node));
for (auto it = nodes_.begin(); it != nodes_.end(); ++it) {
if (&(*it) == node) {
std::list<Node<T, U...>>& destNodes = destGraph->nodes_;
destNodes.splice(destNodes.end(), nodes_, it);
nodeRefs_.erase(node);
destGraph->nodeRefs_.insert(node);
break;
}
}
}
// Move an edge from this graph to the destGraph
void moveEdge(EdgeRef edge, Graph<T, U...>* destGraph) {
assert(hasEdge(edge));
assert(destGraph->hasNode(edge->tail()));
assert(destGraph->hasNode(edge->head()));
std::list<Edge<T, U...>>& destEdges = destGraph->edges_;
for (auto it = edges_.begin(); it != edges_.end(); ++it) {
if (&(*it) == edge) {
destEdges.splice(destEdges.end(), edges_, it);
break;
}
}
}
// Move entire subgraph to destGraph.
// Be sure to delete in/out edges from this graph first.
void moveSubgraph(
const Subgraph<T, U...>& subgraph,
Graph<T, U...>* destGraph) {
auto sg = subgraph; // Copy to check that all nodes and edges are matched
std::list<Edge<T, U...>>& destEdges = destGraph->edges_;
for (auto it = nodes_.begin(); it != nodes_.end(); ++it) {
auto node = &(*it);
if (sg.hasNode(node)) {
std::list<Node<T, U...>>& destNodes = destGraph->nodes_;
destNodes.splice(destNodes.end(), nodes_, it--);
nodeRefs_.erase(node);
destGraph->nodeRefs_.insert(node);
sg.removeNode(node);
}
}
for (auto it = edges_.begin(); it != edges_.end(); ++it) {
auto edge = &(*it);
if (sg.hasEdge(edge)) {
assert(destGraph->hasNode(edge->tail()));
assert(destGraph->hasNode(edge->head()));
destEdges.splice(destEdges.end(), edges_, it--);
sg.removeEdge(edge);
}
}
assert(sg.getNodes().size() == 0);
assert(sg.getEdges().size() == 0);
}
// Validates the graph. Returns true if the graph is valid
// and false if any node or edge referenced in the graph
// is not actually present in the graph.
bool isValid() {
for (auto& node : getMutableNodes()) {
for (auto& inEdge : node->getInEdges()) {
if (!hasEdge(inEdge)) {
DEBUG_PRINT("Invalid inEdge %p on node %p\n", inEdge, node);
return false;
}
}
for (auto& outEdge : node->getOutEdges()) {
if (!hasEdge(outEdge)) {
DEBUG_PRINT("invalid outEdge %p on node %p\n", outEdge, node);
return false;
}
}
// Check validity of nodeRefs_
if (!hasNode(node)) {
DEBUG_PRINT("Invalid node %p\n", node);
return false;
}
}
for (auto& edge : getMutableEdges()) {
if (!hasNode(edge->tail())) {
DEBUG_PRINT("Invalid tail on edge %p\n", edge);
return false;
}
if (!hasNode(edge->head())) {
DEBUG_PRINT("Invalid head on edge %p\n", edge);
return false;
}
}
return true;
}
// Swap two nodes.
// Any edge V -> N1 becomes V -> N2, and N1 -> V becomes N2 -> V.
void swapNodes(NodeRef n1, NodeRef n2) {
// First rectify the edges
for (auto& inEdge : n1->getInEdges()) {
inEdge->setHead(n2);
}
for (auto& outEdge : n1->getOutEdges()) {
outEdge->setTail(n2);
}
for (auto& inEdge : n2->getInEdges()) {
inEdge->setHead(n1);
}
for (auto& outEdge : n2->getOutEdges()) {
outEdge->setTail(n1);
}
// Then simply copy the edge vectors around
auto n1InEdges = n1->getInEdges();
auto n1OutEdges = n1->getOutEdges();
auto n2InEdges = n2->getInEdges();
auto n2OutEdges = n2->getOutEdges();
n1->setOutEdges(n2OutEdges);
n1->setInEdges(n2InEdges);
n2->setOutEdges(n1OutEdges);
n2->setInEdges(n1InEdges);
}
/// \brief Replace a node in the graph with another node.
/// \note The node replaced simply has its edges cut, but it not
/// deleted from the graph. Call Graph::deleteNode to delete it.
/// \p oldNode A node to be replaced in the graph.
/// \p newNode The node that inherit the old node's in-edges and out-edges.
void replaceNode(const NodeRef& oldNode, const NodeRef& newNode) {
replaceInEdges(oldNode, newNode);
replaceOutEdges(oldNode, newNode);
}
// All out-edges oldNode -> V will be replaced with newNode -> V
void replaceOutEdges(const NodeRef& oldNode, const NodeRef& newNode) {
const auto edges = oldNode->getOutEdges();
for (const auto& edge : edges) {
edge->setTail(newNode);
oldNode->removeOutEdge(edge);
newNode->addOutEdge(edge);
}
}
// All in-edges V -> oldNode will be replaced with V -> newNode
void replaceInEdges(const NodeRef& oldNode, const NodeRef& newNode) {
const auto edges = oldNode->getInEdges();
for (const auto& edge : edges) {
edge->setHead(newNode);
oldNode->removeInEdge(edge);
newNode->addInEdge(edge);
}
}
/// \brief Creates a directed edge and retains ownership of it.
/// \p tail The node that will have this edge as an out-edge.
/// \p head The node that will have this edge as an in-edge.
/// \return A reference to the edge created.
EdgeRef createEdge(NodeRef tail, NodeRef head, U... data) {
DEBUG_PRINT("Creating edge (%p -> %p)\n", tail, head);
this->edges_.emplace_back(
Edge<T, U...>(tail, head, std::forward<U...>(data)...));
EdgeRef e = &this->edges_.back();
head->addInEdge(e);
tail->addOutEdge(e);
return e;
}
/// \brief Get a reference to the edge between two nodes if it exists. Returns
/// nullptr if the edge does not exist.
EdgeRef getEdgeIfExists(NodeRef tail, NodeRef head) const {
for (auto& inEdge : head->getInEdges()) {
if (inEdge->tail() == tail) {
return inEdge;
}
}
return nullptr;
}
/// \brief Returns true if there is an edge between the given two nodes.
bool hasEdge(NodeRef tail, NodeRef head) const {
return getEdgeIfExists(tail, head);
}
bool hasEdge(EdgeRef e) const {
for (auto& edge : edges_) {
if (e == &edge) {
return true;
}
}
return false;
}
/// \brief Get a reference to the edge between two nodes if it exists.
/// note: will fail assertion if the edge does not exist.
EdgeRef getEdge(NodeRef tail, NodeRef head) const {
auto result = getEdgeIfExists(tail, head);
assert(result && "Edge doesn't exist.");
return result;
}
/// \brief Deletes a node from the graph.
/// \param n A reference to the node.
void deleteNode(NodeRef n) {
if (!hasNode(n)) {
return;
}
auto inEdges = n->inEdges_;
for (auto& edge : inEdges) {
deleteEdge(edge);
}
auto outEdges = n->outEdges_;
for (auto& edge : outEdges) {
deleteEdge(edge);
}
for (auto i = nodes_.begin(); i != nodes_.end(); ++i) {
if (&*i == n) {
nodeRefs_.erase(n);
nodes_.erase(i);
break;
}
}
}
// Delete all nodes in the set.
void deleteNodes(const std::unordered_set<NodeRef>& nodes) {
for (auto node : nodes) {
deleteNode(node);
}
}
bool hasNode(NodeRef node) const {
return nodeRefs_.find(node) != nodeRefs_.end();
}
/// \brief Deletes a edge from the graph.
/// \p e A reference to the edge.
void deleteEdge(EdgeRef e) {
e->tail_->removeOutEdge(e);
e->head_->removeInEdge(e);
for (auto i = edges_.begin(); i != edges_.end(); ++i) {
if (&*i == e) {
edges_.erase(i);
break;
}
}
}
const std::vector<NodeRef> getMutableNodes() {
std::vector<NodeRef> result;
for (auto& n : nodes_) {
DEBUG_PRINT("Adding node to mutable output (%p)\n", &n);
result.emplace_back(&n);
}
return result;
}
size_t getNodesCount() const {
return (size_t)nodes_.size();
}
const std::vector<EdgeRef> getMutableEdges() {
std::vector<EdgeRef> result;
for (auto& e : edges_) {
DEBUG_PRINT("Adding edge to mutable output (%p)\n", &e);
result.emplace_back(&e);
}
return result;
}
size_t getEdgesCount() const {
return (size_t)edges_.size();
}
private:
std::list<Node<T, U...>> nodes_;
std::list<Edge<T, U...>> edges_;
std::unordered_set<NodeRef> nodeRefs_;
NodeRef createNodeInternal(Node<T, U...>&& node) {
nodes_.emplace_back(std::move(node));
NodeRef nodeRef = &nodes_.back();
DEBUG_PRINT("Creating node (%p)\n", nodeRef);
nodeRefs_.insert(nodeRef);
return nodeRef;
}
void printEdges() {
for (const auto& edge : edges_) {
printf("Edge: %p (%p -> %p)\n", &edge, edge.tail(), edge.head());
}
}
void printNodes() const {
for (const auto& node : nodes_) {
printf("Node: %p\n", &node);
}
}
};
} // namespace nom
#endif // NOM_GRAPH_GRAPH_H
| 15,333
| 26.284698
| 80
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Graph/TarjansImpl.h
|
#ifndef NOM_GRAPH_TARJANSIMPL_H
#define NOM_GRAPH_TARJANSIMPL_H
#include <unordered_map>
#include "nomnigraph/Graph/Graph.h"
namespace nom {
namespace algorithm {
template <typename T, typename... U>
struct GraphWrapper {
struct NodeWrapper {
using NodeRef = typename Graph<T, U...>::NodeRef;
NodeWrapper(NodeRef n) : node(n) {}
NodeWrapper() = default;
NodeRef node;
int Index = -1;
int LowLink = -1;
bool OnStack = false;
};
struct EdgeWrapper {
typename Graph<T, U...>::EdgeRef edge;
};
};
/// \brief Tarjans algorithm implementation.
///
/// See details on how the algorithm works here:
/// https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
///
/// The algorithm works by annotating nodes, but we want to be able to
/// handle generic graphs. Thus, we wrap the input graph with nodes that
/// contain data composed of references to the original graph (for later
/// recovery) and the data required for the algorithm (see NodeWrapper).
///
/// We then run the algorithm and return a reverse-topologically sorted
/// vector of strongly connect components in the form of Subgraphs on the Graph.
///
/// \note Head/Tail is used in reverse in Tarjan's early papers.
/// \bug Edges not included in returned subgraphs.
///
template <typename T, typename... U>
class Tarjans {
using NodeWrapper = typename GraphWrapper<T, U...>::NodeWrapper;
using EdgeWrapper = typename GraphWrapper<T, U...>::EdgeWrapper;
using WrappedGraph = Graph<NodeWrapper, EdgeWrapper>;
using WrappedSubgraph = Subgraph<NodeWrapper, EdgeWrapper>;
private:
int Index = 0;
std::vector<typename WrappedGraph::NodeRef> Stack;
Graph<T, U...>* InputGraph;
WrappedGraph WrappedInputGraph;
std::vector<WrappedSubgraph> WrappedSCCs;
public:
/// \brief Constructor wraps the input graph with an annotated graph
/// set up with the datastructures needed for the algorithm.
/// \p g The graph Tarjan's will be run on.
Tarjans(Graph<T, U...>* g) : InputGraph(g) {
// Wrap Graph with node labels
std::unordered_map<
typename Graph<T, U...>::NodeRef,
typename WrappedGraph::NodeRef>
n_to_wrappedNode;
for (const auto& n : InputGraph->getMutableNodes()) {
NodeWrapper wrappedNode(n);
n_to_wrappedNode[n] =
WrappedInputGraph.createNode(std::move(wrappedNode));
}
for (const auto& e : InputGraph->getMutableEdges()) {
EdgeWrapper wrappedEdge = {e};
WrappedInputGraph.createEdge(
n_to_wrappedNode[e->tail()],
n_to_wrappedNode[e->head()],
std::move(wrappedEdge));
}
}
/// \brief Helper function for finding strongly connected components.
/// \p n A reference to a node within the wrapped graph.
void connect(typename WrappedGraph::NodeRef n) {
n->mutableData()->Index = Index;
n->mutableData()->LowLink = Index;
Index++;
Stack.emplace_back(n);
n->mutableData()->OnStack = true;
for (const auto& outEdge : n->getOutEdges()) {
typename WrappedGraph::NodeRef newNode = outEdge->head();
// Check if we've considered this node before.
if (newNode->data().Index == -1) {
connect(newNode);
n->mutableData()->LowLink =
std::min(n->data().LowLink, newNode->data().LowLink);
// Check if newNode is in the SCC.
} else if (newNode->data().OnStack) {
n->mutableData()->LowLink =
std::min(n->data().LowLink, newNode->data().Index);
}
}
// If our node is a root node, pop it from the stack (we've found an SCC)
if (n->data().LowLink == n->data().Index) {
WrappedSubgraph wrappedSCC;
typename WrappedGraph::NodeRef w;
do {
w = Stack.back();
w->mutableData()->OnStack = false;
Stack.pop_back();
wrappedSCC.addNode(w);
} while (w != n);
// Add all the edges into the SCC.
// TODO include edges in the SCC in a smarter way.
const auto& sccNodes = wrappedSCC.getNodes();
for (const auto& sccNode : sccNodes) {
for (const auto& outEdge : sccNode->getOutEdges()) {
if (std::find(sccNodes.begin(), sccNodes.end(), outEdge->head()) !=
sccNodes.end()) {
wrappedSCC.addEdge(outEdge);
}
}
}
WrappedSCCs.emplace_back(wrappedSCC);
}
}
/// \brief Helper function for recovering a valid subgraph output.
/// \p wrappedS A wrapped subgraph.
/// \return A subgraph of the original input graph.
///
inline Subgraph<T, U...> unwrapSubgraph(
const WrappedSubgraph& wrappedSubgraph) {
Subgraph<T, U...> s;
for (auto wrappedNode : wrappedSubgraph.getNodes()) {
s.addNode(wrappedNode->data().node);
}
for (auto wrappedEdge : wrappedSubgraph.getEdges()) {
s.addEdge(wrappedEdge->data().edge);
}
return s;
}
std::vector<Subgraph<T, U...>> run() {
for (auto n : WrappedInputGraph.getMutableNodes()) {
if (n->data().Index == -1) {
connect(n);
}
}
std::vector<Subgraph<T, U...>> sccs;
for (auto wrappedSCC : WrappedSCCs) {
sccs.emplace_back(unwrapSubgraph(wrappedSCC));
}
return sccs;
}
};
/// \brief A function wrapper to infer the graph template parameters.
template <typename T, typename... U>
std::vector<Subgraph<T, U...>> tarjans(Graph<T, U...>* g) {
Tarjans<T, U...> t(g);
return t.run();
}
} // namespace algorithm
} // namespace nom
#endif // NOM_GRAPH_TARJANSIMPL_H
| 5,526
| 30.050562
| 84
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Graph/TopoSort.h
|
#ifndef NOM_GRAPH_TOPO_SORT_H
#define NOM_GRAPH_TOPO_SORT_H
#include <unordered_map>
#include "nomnigraph/Graph/Graph.h"
namespace nom {
namespace algorithm {
/// \brief Topological sort using DFS.
///
/// This algorithm takes a Graph object and returns node references in
/// topological order.
template <typename GraphT>
class TopoSort {
private:
using NodeRefT = typename GraphT::NodeRef;
GraphT* graph;
/// \brief performs DFS from given node.
// Each node and edge is visited no more than once.
// Visited nodes are pushed into result vector after all children has been
// processed. Return true if cycle is detected, otherwise false.
bool dfs(
NodeRefT node,
std::unordered_map<NodeRefT, int>& status,
std::vector<NodeRefT>& nodes) {
// mark as visiting
status[node] = 1;
for (const auto& outEdge : node->getOutEdges()) {
auto& newNode = outEdge->head();
int newStatus = status[newNode];
if (newStatus == 0) {
if (dfs(newNode, status, nodes)) {
return true;
}
} else if (newStatus == 1) {
// find a node being visited, cycle detected
return true;
}
// ignore visited node
}
nodes.push_back(node);
// mark as visited
status[node] = 2;
return false;
}
public:
TopoSort(GraphT* graph) : graph(graph) {}
struct Result {
enum { OK, CYCLE } status;
std::vector<NodeRefT> nodes;
};
Result run() {
std::vector<NodeRefT> nodes;
std::unordered_map<NodeRefT, int> status;
for (auto& node : graph->getMutableNodes()) {
if (!status[node]) {
if (dfs(node, status, nodes)) {
return {Result::CYCLE, {}};
}
}
}
std::reverse(nodes.begin(), nodes.end());
return {Result::OK, nodes};
}
};
//// \brief A function wrapper to infer the graph template parameters.
/// TODO change this to const GraphT& g
template <typename GraphT>
typename TopoSort<GraphT>::Result topoSort(GraphT* g) {
TopoSort<GraphT> t(g);
return t.run();
}
} // namespace algorithm
} // namespace nom
#endif // NOM_GRAPH_TOPO_SORT_H
| 2,130
| 24.070588
| 77
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Representations/Compiler.h
|
#ifndef NOM_REPRESENTATIONS_COMPILER_H
#define NOM_REPRESENTATIONS_COMPILER_H
#include "caffe2/core/common.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Support/Casting.h"
namespace nom {
namespace repr {
class TORCH_API Value {
public:
enum class ValueKind { Value, Instruction, Data };
Value(ValueKind K) : kind_(K) {}
Value() : kind_(ValueKind::Value) {}
ValueKind getKind() const {
return kind_;
}
virtual ~Value() = default;
private:
const ValueKind kind_;
};
class TORCH_API Data : public Value {
public:
Data() : Value(ValueKind::Data) {}
static bool classof(const Value* V) {
return V->getKind() == ValueKind::Data;
}
virtual ~Data() = default;
size_t getVersion() const {
return version_;
}
void setVersion(size_t version) {
version_ = version;
}
private:
size_t version_ = 0;
};
class TORCH_API Instruction : public Value {
public:
/// \brief All the different types of execution.
enum class Opcode {
Generic, // Handles basic instructions.
TerminatorStart, // LLVM style range of operations.
Branch,
Return,
TerminatorEnd,
Phi
};
Instruction() : Value(ValueKind::Instruction), op_(Opcode::Generic) {}
Instruction(Opcode op) : Value(ValueKind::Instruction), op_(op) {}
static bool classof(const Value* V) {
return V->getKind() == ValueKind::Instruction;
}
virtual ~Instruction() = default;
Opcode getOpcode() const {
return op_;
}
private:
Opcode op_;
};
class TORCH_API Terminator : public Instruction {
public:
Terminator(Instruction::Opcode op) : Instruction(op) {}
private:
static bool classof(const Value* V) {
return isa<Instruction>(V) &&
isTerminator(cast<Instruction>(V)->getOpcode());
}
static bool isTerminator(const Opcode& op) {
return op >= Opcode::TerminatorStart && op <= Opcode::TerminatorEnd;
}
};
class TORCH_API Branch : public Terminator {
public:
Branch() : Terminator(Instruction::Opcode::Branch) {}
};
class TORCH_API Return : public Terminator {
public:
Return() : Terminator(Instruction::Opcode::Return) {}
};
class TORCH_API Phi : public Instruction {
public:
Phi() : Instruction(Instruction::Opcode::Phi) {}
};
} // namespace repr
} // namespace nom
#endif // NOM_REPRESENTATIONS_COMPILER_H
| 2,304
| 21.598039
| 72
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Representations/ControlFlow.h
|
#ifndef NOM_REPRESENTATIONS_CONTROLFLOW_H
#define NOM_REPRESENTATIONS_CONTROLFLOW_H
#include "caffe2/core/common.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Representations/Compiler.h"
#include <unordered_map>
namespace nom {
namespace repr {
/// \brief A basic block holds a reference to a subgraph
/// of the data flow graph as well as an ordering on instruction
/// execution. Basic blocks are used for control flow analysis.
template <typename T, typename... U>
class BasicBlock {
public:
using NodeRef = typename Subgraph<T, U...>::NodeRef;
BasicBlock() {}
BasicBlock(const BasicBlock&) = delete;
BasicBlock(BasicBlock&&) = default;
BasicBlock& operator=(const BasicBlock&) = delete;
~BasicBlock() {
for (auto pair : callbacks_) {
pair.first->deleteDestructorCallback(pair.second);
}
}
void trackNode(NodeRef node) {
callbacks_[node] = node->registerDestructorCallback([&](NodeRef n) {
assert(
hasInstruction(n) &&
"Destructor callback invoked on untracked node in BasicBlock.");
deleteInstruction(n);
});
nodes_.addNode(node);
}
void untrackNode(NodeRef node) {
callbacks_.erase(node);
nodes_.removeNode(node);
}
void pushInstructionNode(NodeRef node) {
assert(
isa<Instruction>(node->data()) &&
"Cannot push non-instruction node to basic block.");
instructions_.emplace_back(node);
trackNode(node);
}
const std::vector<NodeRef>& getInstructions() const {
return instructions_;
}
std::vector<NodeRef>* getMutableInstructions() {
return &instructions_;
}
bool hasInstruction(NodeRef instr) const {
return nodes_.hasNode(instr);
}
void insertInstructionBefore(NodeRef newInstr, NodeRef instr) {
auto it =
std::find(std::begin(instructions_), std::end(instructions_), instr);
instructions_.insert(it, newInstr);
trackNode(newInstr);
}
void moveInstructionBefore(NodeRef instr1, NodeRef instr2) {
assert(hasInstruction(instr1) && "Instruction not in basic block.");
assert(hasInstruction(instr2) && "Instruction not in basic block.");
auto it1 =
std::find(std::begin(instructions_), std::end(instructions_), instr1);
auto it2 =
std::find(std::begin(instructions_), std::end(instructions_), instr2);
auto pos1b = std::distance(instructions_.begin(), it1);
auto pos2b = std::distance(instructions_.begin(), it2);
if (pos1b <= pos2b) {
return;
}
instructions_.erase(it1);
instructions_.insert(it2, instr1);
}
void deleteInstruction(NodeRef instr) {
assert(hasInstruction(instr) && "Instruction not in basic block.");
instructions_.erase(
std::remove(instructions_.begin(), instructions_.end(), instr),
instructions_.end());
untrackNode(instr);
}
private:
Subgraph<T, U...> nodes_;
std::vector<NodeRef> instructions_;
// Because we reference a dataflow graph, we need to register callbacks
// for when the dataflow graph is modified.
std::unordered_map<NodeRef, typename Notifier<Node<T, U...>>::Callback*>
callbacks_;
};
using Program = Graph<Value>;
template <typename G>
struct ControlFlowGraphImpl {
// Hack to help debugging in case this class is misused.
static_assert(
sizeof(ControlFlowGraphImpl),
"Template parameter G in "
"ControlFlowGraph<G> must be of "
"type Graph<T, U...>.");
};
template <typename T, typename... U>
struct ControlFlowGraphImpl<Graph<T, U...>> {
using type = Graph<BasicBlock<T, U...>, int>;
using bbType = BasicBlock<T, U...>;
};
/// \brief Helper for extracting the type of BasicBlocks given
/// a graph (probably a dataflow graph). TODO: refactor this
/// to come from something like Graph::NodeDataType
template <typename G>
using BasicBlockType = typename ControlFlowGraphImpl<G>::bbType;
/// \brief Control flow graph is a graph of basic blocks that
/// can be used as an analysis tool.
///
/// \note G Must be of type Graph<T, U...>.
template <typename G>
class ControlFlowGraph : public ControlFlowGraphImpl<G>::type {
public:
// This is for C++11 compatibility, otherwise we could use "using"
ControlFlowGraph() {}
ControlFlowGraph(const ControlFlowGraph&) = delete;
ControlFlowGraph(ControlFlowGraph&&) = default;
ControlFlowGraph& operator=(ControlFlowGraph&&) = default;
~ControlFlowGraph() {}
std::unordered_map<
std::string,
typename ControlFlowGraphImpl<G>::type::SubgraphType>
functions;
using BasicBlockRef = typename ControlFlowGraphImpl<G>::type::NodeRef;
// Named functions are simply basic blocks stored in labeled Subgraphs
BasicBlockRef createNamedFunction(std::string name) {
assert(name != "anonymous" && "Reserved token anonymous cannot be used");
auto bb = this->createNode(BasicBlockType<G>());
assert(functions.count(name) == 0 && "Name already in use.");
typename ControlFlowGraphImpl<G>::type::SubgraphType sg;
sg.addNode(bb);
functions[name] = sg;
return bb;
}
// Anonymous functions are aggregated into a single Subgraph
BasicBlockRef createAnonymousFunction() {
if (!functions.count("anonymous")) {
functions["anonymous"] =
typename ControlFlowGraphImpl<G>::type::SubgraphType();
}
auto bb = this->createNode(BasicBlockType<G>());
functions["anonymous"].addNode(bb);
return bb;
}
};
/// \brief Deletes a referenced node from the control flow graph.
template <typename G>
void deleteNode(ControlFlowGraph<G>* cfg, typename G::NodeRef node) {
for (auto bbNode : cfg->getMutableNodes()) {
auto bb = bbNode->data().get();
if (bb->hasInstruction(node)) {
bb->deleteInstruction(node);
}
}
}
} // namespace repr
} // namespace nom
#endif // NOM_REPRESENTATIONS_CONTROLFLOW_H
| 5,822
| 30.475676
| 78
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Representations/NeuralNet.h
|
//=== nomnigraph/Representations/NeuralNet.h - NN interface -----*- C++ -*-===//
//
// TODO Licensing.
//
//===----------------------------------------------------------------------===//
//
// This file defines classes that can be used in a
// nom::Graph<nom::repr::NeuralNetOperator, nom::repr::NeuralNetData> graph.
//
//===----------------------------------------------------------------------===//
#ifndef NOM_REPRESENTATIONS_NEURALNET_H
#define NOM_REPRESENTATIONS_NEURALNET_H
#include "caffe2/core/common.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Representations/Compiler.h"
#include "nomnigraph/Representations/ControlFlow.h"
#include "nomnigraph/Support/Casting.h"
#include "nomnigraph/Transformations/SubgraphMatcher.h"
#include <memory>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
#include <assert.h>
namespace nom {
namespace repr {
// Expose supported attribute types to this namespace.
using std::string;
using std::vector;
class NeuralNetData;
/// \brief Annotations allow for generic manipulation of
/// neural network operations. The base class contains
/// a saved void* pointer for external use. Derived classes
/// add richer semantics to the annotation and it is encouraged
/// to use them.
class TORCH_API Annotation {
public:
enum class AnnotationKind { Generic, Caffe2 };
Annotation(AnnotationKind kind) : kind_(kind) {}
Annotation() : kind_(AnnotationKind::Generic) {}
virtual ~Annotation() {}
AnnotationKind getKind() const {
return kind_;
}
private:
const AnnotationKind kind_;
};
class TORCH_API NeuralNetOperator : public Instruction {
public:
/// Discriminator for LLVM-style RTTI (isa<>)
enum class NNKind {
Undefined,
GenericOperator,
NNPhi,
While,
#include "nomnigraph/Generated/OpEnum.h"
};
/// An optional tensor-type specifier.
enum class NNLayout { Undefined, NCHW, NHWC };
NeuralNetOperator(NNKind K, Opcode I, NNLayout L)
: Instruction(I), kind_(K), layout_(L) {}
NeuralNetOperator(NNKind K, Opcode I)
: Instruction(I), kind_(K), layout_(NNLayout::Undefined) {}
NeuralNetOperator(NNKind K, NNLayout L)
: Instruction(), kind_(K), layout_(L) {}
NeuralNetOperator(NNKind K)
: Instruction(), kind_(K), layout_(NNLayout::Undefined) {}
NeuralNetOperator()
: Instruction(), kind_(NNKind::Undefined), layout_(NNLayout::Undefined) {}
NNKind getKind() const {
return kind_;
}
void setLayout(NNLayout L) {
layout_ = L;
}
NNLayout getLayout() const {
return layout_;
}
void setAnnotation(std::unique_ptr<Annotation> extraAnnotation) {
extraAnnotation_ = std::move(extraAnnotation);
}
const Annotation* getAnnotation() const {
return extraAnnotation_.get();
}
Annotation* getMutableAnnotation() {
return extraAnnotation_.get();
}
const std::string getName() const;
/// \brief Validate the inputs and outputs to this operator.
///
/// \p inputs A vector of references to NeuralNetData types that
/// represent the data being fed into the operator.
/// \p outputs A vector of references to NeuralNetData types that
/// represent the data being outputted by the operator.
/// \return true if the inputs and outputs are compatible with the operator.
bool checkInputsAndOutputs(
std::vector<const NeuralNetData*> inputs,
std::vector<const NeuralNetData*> outputs) {
return true;
}
virtual ~NeuralNetOperator() = 0;
NeuralNetOperator(const NeuralNetOperator&) = delete;
NeuralNetOperator& operator=(NeuralNetOperator&) = delete;
private:
const NNKind kind_;
NNLayout layout_; // Mutable attribute, much like a type cast
std::unique_ptr<Annotation> extraAnnotation_;
};
class TORCH_API NeuralNetData : public Data {
public:
/// Discriminator for LLVM-style RTTI (isa<>)
enum class NNDataKind { Generic, Tensor };
NeuralNetData(NNDataKind kind) : kind_(kind) {}
NeuralNetData() : kind_(NNDataKind::Generic) {}
NNDataKind getKind() const {
return kind_;
}
virtual NeuralNetData* clone() = 0;
const std::string getName() const;
virtual ~NeuralNetData() = 0;
private:
NNDataKind kind_;
};
class TORCH_API Tensor : public NeuralNetData {
public:
enum class DataType { Generic, Float, Half, Int8 };
enum class Layout { Generic, NCHW, NHWC };
Tensor(std::string name)
: NeuralNetData(NNDataKind::Tensor),
name_(name),
type_(DataType::Generic) {}
static bool classof(const NeuralNetData* D) {
return D->getKind() == NNDataKind::Tensor;
}
NeuralNetData* clone() override {
return new Tensor(name_);
}
void setType(DataType type) {
type_ = type;
}
DataType getType() const {
return type_;
}
const std::string getName() const {
return name_;
}
void setName(const std::string& name) {
name_ = name;
}
~Tensor() override {}
private:
std::string name_;
DataType type_;
};
#define NOMNIGRAPH_DEFINE_NN_RTTI(op) \
static bool classof(const NeuralNetOperator* N) { \
return N->getKind() == NNKind::op; \
} \
static bool classof(const Value* N) { \
if (isa<NeuralNetOperator>(N)) { \
return dyn_cast<NeuralNetOperator>(N)->getKind() == NNKind::op; \
} \
return false; \
}
#include "nomnigraph/Generated/OpClasses.h"
class TORCH_API While : public NeuralNetOperator {
public:
While() : NeuralNetOperator(NNKind::While, Opcode::Branch) {}
NOMNIGRAPH_DEFINE_NN_RTTI(While);
~While() {}
};
class TORCH_API NNPhi : public NeuralNetOperator {
public:
NNPhi() : NeuralNetOperator(NNKind::NNPhi, Opcode::Phi) {}
NOMNIGRAPH_DEFINE_NN_RTTI(NNPhi);
~NNPhi() {}
};
class TORCH_API GenericOperator : public NeuralNetOperator {
public:
GenericOperator() : NeuralNetOperator(NNKind::GenericOperator) {}
GenericOperator(std::string name)
: NeuralNetOperator(NNKind::GenericOperator), name_(name) {}
NOMNIGRAPH_DEFINE_NN_RTTI(GenericOperator);
std::string getName() const {
return name_;
}
void setName(std::string name) {
name_ = name;
}
~GenericOperator() {}
private:
std::string name_;
};
using NNGraph = nom::Graph<std::unique_ptr<nom::repr::Value>>;
using NNSubgraph = nom::Subgraph<std::unique_ptr<nom::repr::Value>>;
using NNCFGraph = nom::repr::ControlFlowGraph<NNGraph>;
struct TORCH_API NNModule {
NNGraph dataFlow;
NNCFGraph controlFlow;
std::unordered_set<NNGraph::NodeRef> inputs;
std::unordered_set<NNGraph::NodeRef> outputs;
NNModule(const NNModule&) = delete;
NNModule(NNModule&&) = default;
NNModule() {}
/* Replace subgraph sg by node, using the order of
* node_inputs and node_outputs to determine how to link
* them to the node. node_inputs *must* enumerate all the
* inputs to the subgraph (NeuralNetData that do not
* have producers inside the subgraph). Same for node_outputs
*
* New output names may be created in the case that an inputs
* and an output have the same name (to avoid in place ops).
* This may cause issues with external_output -- be sure to check
* after running this function (and perhaps inserting a copy/alias op).
**/
void replaceSubgraph(
const NNGraph::SubgraphType& subgraph,
const NNGraph::NodeRef& node,
const std::vector<NNGraph::NodeRef>& node_inputs,
const std::vector<NNGraph::NodeRef>& node_outputs);
void deleteSubgraph(const NNGraph::SubgraphType& subgraph);
NNGraph::NodeRef createUniqueDataNode(const std::string& s = "_unique");
// Simple wrapper of replaceSubgraph where the node is created for you.
// Returns a NodeRef to the node containing the operator that was created
template <typename T, typename... Args>
NNGraph::NodeRef replaceSubgraphWithOperator(
const NNGraph::SubgraphType&,
const std::vector<NNGraph::NodeRef>&,
const std::vector<NNGraph::NodeRef>&,
Args...);
};
template <typename T, typename... Args>
NNGraph::NodeRef NNModule::replaceSubgraphWithOperator(
const NNGraph::SubgraphType& sg,
const std::vector<NNGraph::NodeRef>& subgraph_inputs,
const std::vector<NNGraph::NodeRef>& subgraph_outputs,
Args... args) {
auto node = dataFlow.createNode(std::make_unique<T>(args...));
replaceSubgraph(sg, node, subgraph_inputs, subgraph_outputs);
return node;
}
// Although these seem generic, they make subtle assumptions
// about the structure of the graph that is 100% valid for NNModule graphs
// but not any graph (such as data being a unique_ptr).
namespace nn {
template <bool B, class T = void>
using enable_if_t = typename std::enable_if<B, T>::type;
template <typename T, typename U>
struct C10_EXPORT inheritedFrom {
static constexpr bool value =
std::is_base_of<U, T>::value && !std::is_same<U, T>::value;
};
// This is just a way to fix issues when the isa<> implementation
// can't automatically downcast.
template <typename T, typename N, typename = void>
struct C10_EXPORT is_impl {
inline static bool impl(N n) {
return isa<T>(n->data());
}
};
template <typename T, typename N>
struct C10_EXPORT
is_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetOperator>::value>> {
inline static bool impl(N n) {
if (!isa<NeuralNetOperator>(n->data().get())) {
return false;
}
auto nno = dyn_cast<NeuralNetOperator>(n->data().get());
return isa<T>(nno);
}
};
template <typename T, typename N>
struct C10_EXPORT
is_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetData>::value>> {
inline static bool impl(N n) {
if (!isa<NeuralNetData>(n->data().get())) {
return false;
}
auto nno = dyn_cast<NeuralNetData>(n->data().get());
return isa<T>(nno);
}
};
template <typename T>
inline bool is(NNGraph::NodeRef n) {
return is_impl<T, NNGraph::NodeRef>::impl(n);
}
// This is just a way to fix issues when the dyn_cast<> implementation
// can't automatically downcast.
template <typename T, typename N, typename = void>
struct C10_EXPORT get_impl {
inline static T* impl(N n) {
return dyn_cast<T>(n->data().get());
}
};
template <typename T, typename N>
struct C10_EXPORT
get_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetOperator>::value>> {
inline static T* impl(N n) {
if (!is<T>(n)) {
assert(0 && "Cannot get type from node");
return nullptr;
}
auto nno = dyn_cast<NeuralNetOperator>(n->data().get());
return dyn_cast<T>(nno);
}
};
template <typename T, typename N>
struct C10_EXPORT
get_impl<T, N, enable_if_t<inheritedFrom<T, NeuralNetData>::value>> {
inline static T* impl(N n) {
if (!is<T>(n)) {
assert(0 && "Cannot get type from node");
return nullptr;
}
auto nno = dyn_cast<NeuralNetData>(n->data().get());
return dyn_cast<T>(nno);
}
};
template <typename T, typename N>
inline T* get(N n) {
return get_impl<T, N>::impl(n);
}
template <typename T, typename G>
std::vector<typename G::NodeRef> nodeIterator(G& g) {
std::vector<typename G::NodeRef> out;
for (auto node : g.getMutableNodes()) {
if (!is<T>(node)) {
continue;
}
out.emplace_back(node);
}
return out;
}
template <typename T>
inline std::vector<NNGraph::NodeRef> filter(NNModule& nn) {
return nodeIterator<T>(nn.dataFlow);
}
template <typename T, typename G>
std::vector<std::pair<T*, typename G::NodeRef>> dataIterator(G& g) {
std::vector<std::pair<T*, typename G::NodeRef>> out;
for (auto node : g.getMutableNodes()) {
if (!is<T>(node)) {
continue;
}
auto d = get<T>(node);
out.emplace_back(std::make_pair(d, node));
}
return out;
}
template <typename T, typename... Args>
void insertOp(
NNGraph& g,
NNGraph::NodeRef a,
NNGraph::NodeRef b,
Args... args) {
if (is<NeuralNetData>(a) && is<NeuralNetOperator>(b)) {
auto newNode = g.createNode(std::make_unique<T>(args...));
auto data = get<NeuralNetData>(a);
auto newData =
g.createNode(std::make_unique<Tensor>(data->getName() + "_"));
g.createEdge(a, newNode);
g.createEdge(newNode, newData);
g.createEdge(newData, b);
return;
}
if (is<NeuralNetOperator>(a) && is<NeuralNetData>(b)) {
auto newNode = g.createNode(std::make_unique<T>(args...));
auto data = get<NeuralNetData>(b);
auto newData =
g.createNode(std::make_unique<Tensor>(data->getName() + "_"));
g.createEdge(a, newData);
g.createEdge(newData, newNode);
g.createEdge(newNode, b);
return;
}
assert(0 && "insertOp takes (DFG, Tensor, Op) or (DFG, Op, Tensor)");
}
template <typename NewT, typename OldT>
NNGraph::NodeRef convertNode(NNGraph& g, NNGraph::NodeRef node) {
assert(is<OldT>(node) && "Cannot get type from node.");
NeuralNetOperator* nnOpPtr =
dyn_cast<NeuralNetOperator>(node->mutableData()->release());
auto newNode =
g.createNode(std::make_unique<NewT>(*dyn_cast<OldT>(nnOpPtr)));
g.replaceNode(node, newNode);
g.deleteNode(node);
return newNode;
}
/// NeuralNetData specific helpers.
TORCH_API bool hasProducer(NNGraph::NodeRef n);
TORCH_API NNGraph::NodeRef getProducer(NNGraph::NodeRef n);
TORCH_API bool hasConsumer(NNGraph::NodeRef n);
TORCH_API std::vector<NNGraph::NodeRef> getConsumers(NNGraph::NodeRef n);
TORCH_API bool hasInputs(NNGraph::NodeRef n);
TORCH_API std::vector<NNGraph::NodeRef> getInputs(NNGraph::NodeRef n);
TORCH_API std::vector<NNGraph::NodeRef> getOutputs(NNGraph::NodeRef n);
TORCH_API std::set<NNGraph::NodeRef> getInputs(const NNSubgraph& sg);
TORCH_API std::set<NNGraph::NodeRef> getOutputs(const NNSubgraph& sg);
// Get the name of the node regardless of underlying type.
TORCH_API std::string getName(NNGraph::NodeRef n);
// Replace the producer of the first argument with the second argument
TORCH_API void replaceProducer(
NNGraph::NodeRef tensorNode,
NNGraph::NodeRef newProducer);
// Set all consumers of first argument to consume the second argument
TORCH_API void replaceAllUsesWith(
NNGraph::NodeRef oldTensorNode,
NNGraph::NodeRef newTensorNode);
// Set the second argument to consume the inputs of the first argument
TORCH_API void replaceAsConsumer(
NNGraph::NodeRef oldConsumer,
NNGraph::NodeRef newConsumer);
// Create an output tensor node
TORCH_API NNGraph::NodeRef
createOutput(NNModule* nn, NNGraph::NodeRef producer, std::string name);
// Hack for windows compiler.
template <typename T, typename... Args>
TORCH_API NNGraph::NodeRef createOperator(NNModule* nn, Args... args);
// Create an operator
template <typename T, typename... Args>
NNGraph::NodeRef createOperator(NNModule* nn, Args... args) {
return nn->dataFlow.createNode(std::make_unique<T>(args...));
}
TORCH_API void coalesceInsertedDataDependencies(repr::NNModule* m);
template <NNGraph* G>
struct C10_EXPORT NodeHelper {};
using NNMatchGraph = nom::matcher::MatchGraph<NNGraph>;
using NNMatchPredicate = nom::matcher::MatchPredicate<NNGraph>;
// Commonly used node predicate.
// The node has a single output and the output has a single consumer.
TORCH_API bool hasSingleOutputAndConsumer(NNGraph::NodeRef nodeRef);
// The node has a unique consumer (there may be multiple edges from output
// to the single consumer).
TORCH_API bool hasUniqueConsumer(NNGraph::NodeRef nodeRef);
TORCH_API NNMatchPredicate matchExternalTensorNode();
} // namespace nn
} // namespace repr
} // namespace nom
#endif // NOM_REPRESENTATIONS_NEURALNET_H
| 15,713
| 28.482176
| 80
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Support/Casting.h
|
//===- nomnigraph/Support/Casting.h - Allow casting checks ------*- C++ -*-===//
//
// This is taken directly from LLVM's source code.
//
// The original file is distributed under the University of Illinois Open Source
// License.
//
//===----------------------------------------------------------------------===//
//
// This file defines the isa<X>() function for checking downcastibility.
//
//===----------------------------------------------------------------------===//
#ifndef NOM_SUPPORT_CASTING_H
#define NOM_SUPPORT_CASTING_H
#include <assert.h>
#include <memory>
//===----------------------------------------------------------------------===//
// isa<x> Support Templates
//===----------------------------------------------------------------------===//
/// NOMNIGRAPH_NODISCARD - Warn if a type or return value is discarded.
#define NOMNIGRAPH_NODISCARD
#if __cplusplus > 201402L && defined(__has_cpp_attribute)
#if __has_cpp_attribute(nodiscard)
#undef NOMNIGRAPH_NODISCARD
#define NOMNIGRAPH_NODISCARD [[nodiscard]]
#endif
// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
// error when __has_cpp_attribute is given a scoped attribute in C mode.
#elif __cplusplus && defined(__has_cpp_attribute)
#if __has_cpp_attribute(clang::warn_unused_result)
#undef NOMNIGRAPH_NODISCARD
#define NOMNIGRAPH_NODISCARD [[clang::warn_unused_result]]
#endif
#endif
/// \brief If T is a pointer, just return it. If it is not, return T&.
template <typename T, typename Enable = void>
struct add_lvalue_reference_if_not_pointer {
using type = T&;
};
/// \brief If T is a pointer to X, return a pointer to const X. If it is not,
/// return const T.
template <typename T, typename Enable = void>
struct add_const_past_pointer {
using type = const T;
};
// Define a template that can be specialized by smart pointers to reflect the
// fact that they are automatically dereferenced, and are not involved with the
// template selection process... the default implementation is a noop.
//
template <typename From>
struct simplify_type {
using SimpleType = From; // The real type this represents...
// An accessor to get the real value...
static SimpleType& getSimplifiedValue(From& Val) {
return Val;
}
};
template <typename From>
struct simplify_type<const From> {
using NonConstSimpleType = typename simplify_type<From>::SimpleType;
using SimpleType = typename add_const_past_pointer<NonConstSimpleType>::type;
using RetType =
typename add_lvalue_reference_if_not_pointer<SimpleType>::type;
static RetType getSimplifiedValue(const From& Val) {
return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val));
}
};
// The core of the implementation of isa<X> is here; To and From should be
// the names of classes. This template can be specialized to customize the
// implementation of isa<> without rewriting it from scratch.
template <typename To, typename From, typename Enabler = void>
struct isa_impl {
static inline bool doit(const From& Val) {
return To::classof(&Val);
}
};
/// \brief Always allow upcasts, and perform no dynamic check for them.
template <typename To, typename From>
struct isa_impl<
To,
From,
typename std::enable_if<std::is_base_of<To, From>::value>::type> {
static inline bool doit(const From&) {
return true;
}
};
template <typename To, typename From>
struct isa_impl_cl {
static inline bool doit(const From& Val) {
return isa_impl<To, From>::doit(Val);
}
};
template <typename To, typename From>
struct isa_impl_cl<To, const From> {
static inline bool doit(const From& Val) {
return isa_impl<To, From>::doit(Val);
}
};
template <typename To, typename From>
struct isa_impl_cl<To, const std::unique_ptr<From>> {
static inline bool doit(const std::unique_ptr<From>& Val) {
assert(Val && "isa<> used on a null pointer");
return isa_impl_cl<To, From>::doit(*Val);
}
};
template <typename To, typename From>
struct isa_impl_cl<To, From*> {
static inline bool doit(const From* Val) {
assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From>
struct isa_impl_cl<To, From* const> {
static inline bool doit(const From* Val) {
assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From>
struct isa_impl_cl<To, const From*> {
static inline bool doit(const From* Val) {
assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From>
struct isa_impl_cl<To, const From* const> {
static inline bool doit(const From* Val) {
assert(Val && "isa<> used on a null pointer");
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From, typename SimpleFrom>
struct isa_impl_wrap {
// When From != SimplifiedType, we can simplify the type some more by using
// the simplify_type template.
static bool doit(const From& Val) {
return isa_impl_wrap<
To,
SimpleFrom,
typename simplify_type<SimpleFrom>::SimpleType>::
doit(simplify_type<const From>::getSimplifiedValue(Val));
}
};
template <typename To, typename FromTy>
struct isa_impl_wrap<To, FromTy, FromTy> {
// When From == SimpleType, we are as simple as we are going to get.
static bool doit(const FromTy& Val) {
return isa_impl_cl<To, FromTy>::doit(Val);
}
};
// isa<X> - Return true if the parameter to the template is an instance of the
// template type argument. Used like this:
//
// if (isa<Type>(myVal)) { ... }
//
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline bool isa(const Y& Val) {
return isa_impl_wrap<
X,
const Y,
typename simplify_type<const Y>::SimpleType>::doit(Val);
}
//===----------------------------------------------------------------------===//
// cast<x> Support Templates
//===----------------------------------------------------------------------===//
template <class To, class From>
struct cast_retty;
// Calculate what type the 'cast' function should return, based on a requested
// type of To and a source type of From.
template <class To, class From>
struct cast_retty_impl {
using ret_type = To&; // Normal case, return Ty&
};
template <class To, class From>
struct cast_retty_impl<To, const From> {
using ret_type = const To&; // Normal case, return Ty&
};
template <class To, class From>
struct cast_retty_impl<To, From*> {
using ret_type = To*; // Pointer arg case, return Ty*
};
template <class To, class From>
struct cast_retty_impl<To, const From*> {
using ret_type = const To*; // Constant pointer arg case, return const Ty*
};
template <class To, class From>
struct cast_retty_impl<To, const From* const> {
using ret_type = const To*; // Constant pointer arg case, return const Ty*
};
template <class To, class From>
struct cast_retty_impl<To, std::unique_ptr<From>> {
private:
using PointerType = typename cast_retty_impl<To, From*>::ret_type;
using ResultType = typename std::remove_pointer<PointerType>::type;
public:
using ret_type = std::unique_ptr<ResultType>;
};
template <class To, class From, class SimpleFrom>
struct cast_retty_wrap {
// When the simplified type and the from type are not the same, use the type
// simplifier to reduce the type, then reuse cast_retty_impl to get the
// resultant type.
using ret_type = typename cast_retty<To, SimpleFrom>::ret_type;
};
template <class To, class FromTy>
struct cast_retty_wrap<To, FromTy, FromTy> {
// When the simplified type is equal to the from type, use it directly.
using ret_type = typename cast_retty_impl<To, FromTy>::ret_type;
};
template <class To, class From>
struct cast_retty {
using ret_type = typename cast_retty_wrap<
To,
From,
typename simplify_type<From>::SimpleType>::ret_type;
};
// Ensure the non-simple values are converted using the simplify_type template
// that may be specialized by smart pointers...
//
template <class To, class From, class SimpleFrom>
struct cast_convert_val {
// This is not a simple type, use the template to simplify it...
static typename cast_retty<To, From>::ret_type doit(From& Val) {
return cast_convert_val<
To,
SimpleFrom,
typename simplify_type<SimpleFrom>::SimpleType>::
doit(simplify_type<From>::getSimplifiedValue(Val));
}
};
template <class To, class FromTy>
struct cast_convert_val<To, FromTy, FromTy> {
// This _is_ a simple type, just cast it.
static typename cast_retty<To, FromTy>::ret_type doit(const FromTy& Val) {
typename cast_retty<To, FromTy>::ret_type Res2 =
(typename cast_retty<To, FromTy>::ret_type) const_cast<FromTy&>(Val);
return Res2;
}
};
template <class X>
struct is_simple_type {
static const bool value =
std::is_same<X, typename simplify_type<X>::SimpleType>::value;
};
// cast<X> - Return the argument parameter cast to the specified type. This
// casting operator asserts that the type is correct, so it does not return null
// on failure. It does not allow a null argument (use cast_or_null for that).
// It is typically used like this:
//
// cast<Instruction>(myVal)->getParent()
//
template <class X, class Y>
inline typename std::enable_if<
!is_simple_type<Y>::value,
typename cast_retty<X, const Y>::ret_type>::type
cast(const Y& Val) {
assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
return cast_convert_val<
X,
const Y,
typename simplify_type<const Y>::SimpleType>::doit(Val);
}
template <class X, class Y>
inline typename cast_retty<X, Y>::ret_type cast(Y& Val) {
assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
return cast_convert_val<X, Y, typename simplify_type<Y>::SimpleType>::doit(
Val);
}
template <class X, class Y>
inline typename cast_retty<X, Y*>::ret_type cast(Y* Val) {
assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
return cast_convert_val<X, Y*, typename simplify_type<Y*>::SimpleType>::doit(
Val);
}
template <class X, class Y>
inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type cast(
std::unique_ptr<Y>&& Val) {
assert(isa<X>(Val.get()) && "cast<Ty>() argument of incompatible type!");
using ret_type = typename cast_retty<X, std::unique_ptr<Y>>::ret_type;
return ret_type(
cast_convert_val<X, Y*, typename simplify_type<Y*>::SimpleType>::doit(
Val.release()));
}
// cast_or_null<X> - Functionally identical to cast, except that a null value is
// accepted.
//
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename std::enable_if<
!is_simple_type<Y>::value,
typename cast_retty<X, const Y>::ret_type>::type
cast_or_null(const Y& Val) {
if (!Val)
return nullptr;
assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
return cast<X>(Val);
}
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename std::enable_if<
!is_simple_type<Y>::value,
typename cast_retty<X, Y>::ret_type>::type
cast_or_null(Y& Val) {
if (!Val)
return nullptr;
assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
return cast<X>(Val);
}
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename cast_retty<X, Y*>::ret_type cast_or_null(
Y* Val) {
if (!Val)
return nullptr;
assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
return cast<X>(Val);
}
template <class X, class Y>
inline typename cast_retty<X, std::unique_ptr<Y>>::ret_type cast_or_null(
std::unique_ptr<Y>&& Val) {
if (!Val)
return nullptr;
return cast<X>(std::move(Val));
}
// dyn_cast<X> - Return the argument parameter cast to the specified type. This
// casting operator returns null if the argument is of the wrong type, so it can
// be used to test for a type as well as cast if successful. This should be
// used in the context of an if statement like this:
//
// if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }
//
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename std::enable_if<
!is_simple_type<Y>::value,
typename cast_retty<X, const Y>::ret_type>::type
dyn_cast(const Y& Val) {
return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename cast_retty<X, Y>::ret_type dyn_cast(
Y& Val) {
return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename cast_retty<X, Y*>::ret_type dyn_cast(
Y* Val) {
return isa<X>(Val) ? cast<X>(Val) : nullptr;
}
// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
// value is accepted.
//
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename std::enable_if<
!is_simple_type<Y>::value,
typename cast_retty<X, const Y>::ret_type>::type
dyn_cast_or_null(const Y& Val) {
return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename std::enable_if<
!is_simple_type<Y>::value,
typename cast_retty<X, Y>::ret_type>::type
dyn_cast_or_null(Y& Val) {
return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
}
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline typename cast_retty<X, Y*>::ret_type
dyn_cast_or_null(Y* Val) {
return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
}
// unique_dyn_cast<X> - Given a unique_ptr<Y>, try to return a unique_ptr<X>,
// taking ownership of the input pointer iff isa<X>(Val) is true. If the
// cast is successful, From refers to nullptr on exit and the casted value
// is returned. If the cast is unsuccessful, the function returns nullptr
// and From is unchanged.
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline auto unique_dyn_cast(std::unique_ptr<Y>& Val)
-> decltype(cast<X>(Val)) {
if (!isa<X>(Val))
return nullptr;
return cast<X>(std::move(Val));
}
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline auto unique_dyn_cast(std::unique_ptr<Y>&& Val)
-> decltype(cast<X>(Val)) {
return unique_dyn_cast<X, Y>(Val);
}
// dyn_cast_or_null<X> - Functionally identical to unique_dyn_cast, except that
// a null value is accepted.
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline auto unique_dyn_cast_or_null(
std::unique_ptr<Y>& Val) -> decltype(cast<X>(Val)) {
if (!Val)
return nullptr;
return unique_dyn_cast<X, Y>(Val);
}
template <class X, class Y>
NOMNIGRAPH_NODISCARD inline auto unique_dyn_cast_or_null(
std::unique_ptr<Y>&& Val) -> decltype(cast<X>(Val)) {
return unique_dyn_cast_or_null<X, Y>(Val);
}
#endif // NOM_SUPPORT_CASTING_H
| 14,766
| 31.242358
| 80
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Support/Common.h
|
//== nomnigraph/Support/Common.h - Common class implementations --*- C++ -*-==//
//
// TODO Licensing.
//
//===----------------------------------------------------------------------===//
//
// This file defines basic classes that are useful to inherit from.
//
//===----------------------------------------------------------------------===//
#ifndef NOM_SUPPORT_COMMON_H
#define NOM_SUPPORT_COMMON_H
#include <functional>
#include <list>
// These #defines are useful when writing passes as the collapse
//
// if (!cond) {
// continue; // or break; or return;
// }
//
// into a single line without negation
#define NOM_REQUIRE_OR_(_cond, _expr) \
if (!(_cond)) { \
_expr; \
}
#define NOM_REQUIRE_OR_CONT(_cond) NOM_REQUIRE_OR_(_cond, continue)
#define NOM_REQUIRE_OR_BREAK(_cond) NOM_REQUIRE_OR_(_cond, break)
#define NOM_REQUIRE_OR_RET_NULL(_cond) NOM_REQUIRE_OR_(_cond, return nullptr)
#define NOM_REQUIRE_OR_RET_FALSE(_cond) NOM_REQUIRE_OR_(_cond, return false)
#define NOM_REQUIRE_OR_RET_TRUE(_cond) NOM_REQUIRE_OR_(_cond, return true)
#define NOM_REQUIRE_OR_RET(_cond) NOM_REQUIRE_OR_(_cond, return )
// Implements accessors for a generic type T. If the type is not
// specified (i.e., void template type) then the partial specification
// gives an empty type.
template <typename T = void>
class StorageType {
public:
StorageType(T&& data) : Data(std::move(data)) {}
StorageType(const T& data) = delete;
StorageType() {}
const T& data() const {
return Data;
}
T* mutableData() {
return &Data;
}
void resetData(T&& data) {
Data = std::move(data);
}
private:
T Data;
};
template <>
class StorageType<> {};
/// \brief This class enables a listener pattern.
/// It is to be used with a "curious recursive pattern"
/// i.e. Derived : public Notifier<Derived> {}
template <typename T>
class Notifier {
public:
using Callback = std::function<void(T*)>;
Notifier() {}
Callback* registerDestructorCallback(Callback fn) {
dtorCallbacks_.emplace_back(fn);
return &dtorCallbacks_.back();
}
Callback* registerNotificationCallback(Callback fn) {
notifCallbacks_.emplace_back(fn);
return ¬ifCallbacks_.back();
}
void deleteCallback(std::list<Callback>& callbackList, Callback* toDelete) {
for (auto i = callbackList.begin(); i != callbackList.end(); ++i) {
if (&*i == toDelete) {
callbackList.erase(i);
break;
}
}
}
void deleteDestructorCallback(Callback* c) {
deleteCallback(dtorCallbacks_, c);
}
void deleteNotificationCallback(Callback* c) {
deleteCallback(notifCallbacks_, c);
}
/// \brief Notifies all listeners (`registerNotificationCallback`
/// users) of an update. Assumes the information of the update
/// is encoded in the state of the derived class, thus only passing
/// a pointer of type T* to the callback.
void notify() {
for (auto callback : notifCallbacks_) {
callback(reinterpret_cast<T*>(this));
}
}
virtual ~Notifier() {
for (auto callback : dtorCallbacks_) {
callback(reinterpret_cast<T*>(this));
}
}
private:
std::list<Callback> dtorCallbacks_;
std::list<Callback> notifCallbacks_;
};
#endif /* NOM_SUPPORT_COMMON_H */
| 3,279
| 25.885246
| 80
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Transformations/Match.h
|
//=== nomnigraph/Transformations/Match.h - Graph matching utils -*- C++ -*-===//
//
// TODO Licensing.
//
//===----------------------------------------------------------------------===//
//
// This file defines utilities for matching subgraphs.
//
//===----------------------------------------------------------------------===//
#ifndef NOM_TRANFORMATIONS_MATCH_H
#define NOM_TRANFORMATIONS_MATCH_H
#include "nomnigraph/Graph/Algorithms.h"
#include <algorithm>
#include <vector>
namespace nom {
template <typename T>
struct NodeEqualityDefault {
static bool equal(const T& a, const T& b) {
return a->data() == b->data();
}
};
template <
typename G,
typename EqualityClass = NodeEqualityDefault<typename G::NodeRef>>
class Match {
public:
using SubgraphType = typename G::SubgraphType;
Match(G& g) : MatchGraph(g) {
// First we sort both the matching graph topologically.
// This could give us a useful anchor in the best case.
auto result = nom::algorithm::topoSort(&MatchGraph);
MatchNodeList = result.nodes;
}
std::vector<SubgraphType> recursiveMatch(
typename G::NodeRef candidateNode,
std::vector<typename G::NodeRef> stack,
SubgraphType currentSubgraph) {
if (EqualityClass::equal(stack.back(), candidateNode)) {
currentSubgraph.addNode(candidateNode);
// Base case
if (stack.size() == MatchNodeList.size()) {
return std::vector<SubgraphType>{currentSubgraph};
}
// Recurse and accumulate matches
stack.emplace_back(MatchNodeList.at(stack.size()));
std::vector<SubgraphType> matchingSubgraphs;
for (auto outEdge : candidateNode->getOutEdges()) {
for (auto subgraph :
recursiveMatch(outEdge->head(), stack, currentSubgraph)) {
matchingSubgraphs.emplace_back(subgraph);
}
}
return matchingSubgraphs;
}
// No match here, early bailout
return std::vector<SubgraphType>{};
}
std::vector<SubgraphType> match(G& g) {
std::vector<SubgraphType> out;
std::vector<typename G::NodeRef> stack;
stack.emplace_back(MatchNodeList.front());
// Try each node in the candidate graph as the anchor.
for (auto n : g.getMutableNodes()) {
for (auto subgraph : recursiveMatch(n, stack, SubgraphType())) {
out.emplace_back(subgraph);
}
}
return out;
}
private:
G& MatchGraph;
std::vector<typename G::NodeRef> MatchNodeList;
};
} // namespace nom
#endif // NOM_TRANFORMATIONS_MATCH_H
| 2,523
| 25.568421
| 80
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/include/nomnigraph/Transformations/SubgraphMatcher.h
|
#ifndef NOM_TRANFORMATIONS_SUBGRAPH_MATCHER_H
#define NOM_TRANFORMATIONS_SUBGRAPH_MATCHER_H
#include "c10/util/irange.h"
#include "caffe2/core/common.h"
#include "nomnigraph/Graph/Graph.h"
#include <functional>
#include <memory>
#include <sstream>
#include <unordered_map>
#include <vector>
namespace nom {
namespace matcher {
/**
* MatchGraph is a graph of MatchPredicate.
*
* MatchPredicate needs a predicate for node matching and
* - includeInSubgraph: whether this node and nodes/edges reachable from it
* should be included in the return matched subgraph (if the pattern matches).
* This is useful in case we would like to specify a matching pattern but do not
* want part of the pattern to be included in the returned subgraph.
* - A count, which means we may want to match this node multiple times from its
* incoming edges. The count can be unlimited (think about it as a regex star).
* - If nonTerminal flag is set, it means we will not consider outgoing edges
* from the node when doing subgraph matching.
*/
template <typename GraphType>
class MatchPredicate {
public:
using Predicate = std::function<bool(typename GraphType::NodeRef)>;
static const int kStarCount = -1;
MatchPredicate(const Predicate& criteria) : criteria_(criteria) {}
MatchPredicate() = default;
MatchPredicate(const MatchPredicate&) = default;
MatchPredicate& operator=(const MatchPredicate&) = default;
MatchPredicate(MatchPredicate&&) = default;
Predicate getCriteria() const {
return criteria_;
}
int getCount() const {
return count_;
}
MatchPredicate<GraphType>& count(int count) {
count_ = count;
return *this;
}
MatchPredicate<GraphType>& starCount() {
return count(kStarCount);
}
MatchPredicate<GraphType>& nonTerminal() {
nonTerminal_ = true;
return *this;
}
MatchPredicate<GraphType>& excludeFromSubgraph() {
includeInSubgraph_ = false;
return *this;
}
bool isNonTerminal() const {
return nonTerminal_;
}
bool shouldIncludeInSubgraph() const {
return includeInSubgraph_;
}
std::string getDebugString() const {
return debugString_;
}
void setDebugString(const std::string& debugString) {
debugString_ = debugString;
}
private:
Predicate criteria_;
int count_ = 1;
bool includeInSubgraph_ = true;
bool nonTerminal_ = false;
std::string debugString_;
};
template <typename GraphType>
class SubgraphMatchResult;
// MatchGraph is a graph of MatchPredicate and it contains utilities for
// subgraph matching.
// (TODO) the subgraph matching methods currently still
// requires a root match node to be passed in. We should improve the matching
// algorithm to eliminate this requirement.
template <typename GraphType>
class MatchGraph : public Graph<MatchPredicate<GraphType>> {
public:
using SubgraphMatchResultType = SubgraphMatchResult<GraphType>;
using ReplaceGraphOperation = std::function<bool(
GraphType&,
typename GraphType::NodeRef,
const SubgraphMatchResultType&)>;
bool isNodeMatch(
typename GraphType::NodeRef node,
const MatchPredicate<GraphType>& matchPredicate) const {
return matchPredicate.getCriteria()(node);
}
// Check if there can be a subgraph that matches the given criteria that
// is rooted at the given rootNode.
// The flag invertGraphTraversal specify if we should follow out edges or
// in edges. The default is true which is useful for a functional
// interpretation of a dataflow graph.
SubgraphMatchResultType isSubgraphMatch(
typename GraphType::NodeRef root,
const typename MatchGraph::NodeRef& rootCriteriaRef,
bool invertGraphTraversal = true,
bool debug = false) const {
// Create a matched result that owns a matched subgraph object and pass
// the subgraph object around to construct it during matching.
auto matchedResult = SubgraphMatchResultType::matched(true);
auto result = isSubgraphMatchInternal(
matchedResult.getMatchNodeMap(),
matchedResult.getMatchedSubgraph(),
root,
rootCriteriaRef,
rootCriteriaRef->data().shouldIncludeInSubgraph(),
invertGraphTraversal,
debug);
return result.isMatch() ? matchedResult : result;
}
// Utility to transform a graph by looking for subgraphs that match
// a given pattern and then allow callers to mutate the graph based on
// subgraphs that are found.
// The current implementation doesn't handle any graph transformation
// itself. Callers should be responsible for all intended mutation, including
// deleting nodes in the subgraphs found by this algorithm.
// Note: if the replaceFunction lambda returns false, the entire procedure
// is aborted. This maybe useful in certain cases when we want to terminate
// the subgraph search early.
// invertGraphTraversal flag: see documentation in isSubgraphMatch
void replaceSubgraph(
GraphType& graph,
const typename MatchGraph::NodeRef& criteria,
const ReplaceGraphOperation& replaceFunction,
bool invertGraphTraversal = true) const {
for (auto nodeRef : graph.getMutableNodes()) {
// Make sure the node is still in the graph.
if (!graph.hasNode(nodeRef)) {
continue;
}
auto matchResult =
isSubgraphMatch(nodeRef, criteria, invertGraphTraversal);
if (matchResult.isMatch()) {
if (!replaceFunction(graph, nodeRef, matchResult)) {
// If replaceFunction returns false, it means that we should abort
// the entire procedure.
break;
}
}
}
}
private:
SubgraphMatchResultType isSubgraphMatchInternal(
std::shared_ptr<typename SubgraphMatchResultType::MatchNodeMap>
matchedNodes,
std::shared_ptr<typename GraphType::SubgraphType> matchedSubgraph,
typename GraphType::NodeRef root,
const typename MatchGraph::NodeRef& rootCriteriaRef,
bool includeInSubgraph,
bool invertGraphTraversal,
bool debug) const {
auto rootCriteriaNode = rootCriteriaRef->data();
if (rootCriteriaNode.getCount() == 1) {
auto matchedNodeEntry = matchedNodes->find(rootCriteriaRef);
if (matchedNodeEntry != matchedNodes->end()) {
// If rootCriteriaRef has been matched before (without multiplicity),
// we should look up the corresponding matched node in the graph
// and verify if it is the same.
auto matchedNode = matchedNodeEntry->second;
if (matchedNode == root) {
return SubgraphMatchResultType::matched();
} else if (debug) {
std::ostringstream debugMessage;
debugMessage << "Subgraph root at " << root << " is not the same as "
<< matchedNode << " which previously matched criteria "
<< debugString(rootCriteriaRef, invertGraphTraversal);
return SubgraphMatchResultType::notMatched(debugMessage.str());
} else {
return SubgraphMatchResultType::notMatched();
}
}
}
if (!isNodeMatch(root, rootCriteriaNode)) {
if (debug) {
std::ostringstream debugMessage;
debugMessage << "Subgraph root at " << root
<< " does not match criteria "
<< debugString(rootCriteriaRef, invertGraphTraversal);
return SubgraphMatchResultType::notMatched(debugMessage.str());
} else {
return SubgraphMatchResultType::notMatched();
}
}
if (rootCriteriaNode.isNonTerminal()) {
// This is sufficient to be a match if this criteria specifies a non
// terminal node.
matchedNodes->emplace(rootCriteriaRef, root);
if (includeInSubgraph) {
matchedSubgraph->addNode(root);
}
return SubgraphMatchResultType::matched();
}
auto& edges =
invertGraphTraversal ? root->getInEdges() : root->getOutEdges();
int numEdges = edges.size();
const auto criteriaEdges = invertGraphTraversal
? rootCriteriaRef->getInEdges()
: rootCriteriaRef->getOutEdges();
int numChildrenCriteria = criteriaEdges.size();
// The current algorithm implies that the ordering of the children is
// important. The children nodes will be matched with the children subgraph
// criteria in the given order.
int currentEdgeIdx = 0;
for (const auto criteriaIdx : c10::irange(numChildrenCriteria)) {
auto childrenCriteriaRef = invertGraphTraversal
? criteriaEdges[criteriaIdx]->tail()
: criteriaEdges[criteriaIdx]->head();
int expectedCount = childrenCriteriaRef->data().getCount();
bool isStarCount = expectedCount == MatchPredicate<GraphType>::kStarCount;
int countMatch = 0;
// Continue to match subsequent edges with the current children criteria.
// Note that if the child criteria is a * pattern, this greedy algorithm
// will attempt to find the longest possible sequence that matches the
// children criteria.
for (; currentEdgeIdx < numEdges &&
(isStarCount || countMatch < expectedCount);
currentEdgeIdx++) {
auto edge = edges[currentEdgeIdx];
auto child = invertGraphTraversal ? edge->tail() : edge->head();
bool shouldIncludeEdgeInSubgraph =
childrenCriteriaRef->data().shouldIncludeInSubgraph() &&
includeInSubgraph;
if (!isSubgraphMatchInternal(
matchedNodes,
matchedSubgraph,
child,
childrenCriteriaRef,
shouldIncludeEdgeInSubgraph,
invertGraphTraversal,
debug)
.isMatch()) {
if (!isStarCount) {
// If the current criteria isn't a * pattern, this indicates a
// failure.
if (debug) {
std::ostringstream debugMessage;
debugMessage << "Child node at " << child
<< " does not match child criteria "
<< debugString(
childrenCriteriaRef, invertGraphTraversal)
<< ". We expected " << expectedCount
<< " matches but only found " << countMatch << ".";
return SubgraphMatchResultType::notMatched(debugMessage.str());
} else {
return SubgraphMatchResultType::notMatched();
}
} else {
// Otherwise, we should move on to the next children criteria.
break;
}
} else if (shouldIncludeEdgeInSubgraph) {
matchedSubgraph->addEdge(edge);
}
countMatch++;
}
if (countMatch < expectedCount) {
// Fails because there are not enough matches as specified by the
// criteria.
if (debug) {
std::ostringstream debugMessage;
debugMessage << "Expected " << expectedCount
<< " matches for child criteria "
<< debugString(childrenCriteriaRef, invertGraphTraversal)
<< " but only found " << countMatch;
return SubgraphMatchResultType::notMatched(debugMessage.str());
} else {
return SubgraphMatchResultType::notMatched();
}
}
}
if (currentEdgeIdx < numEdges) {
// Fails because there are unmatched edges.
if (debug) {
std::ostringstream debugMessage;
debugMessage << "Unmatched children for subgraph root at " << root
<< ". There are " << numEdges
<< " children, but only found " << currentEdgeIdx
<< " matches for the children criteria.";
return SubgraphMatchResultType::notMatched(debugMessage.str());
} else {
return SubgraphMatchResultType::notMatched();
}
}
matchedNodes->emplace(rootCriteriaRef, root);
if (includeInSubgraph) {
matchedSubgraph->addNode(root);
}
return SubgraphMatchResultType::matched();
}
// TODO: Reuse convertToDotString once convertToDotString can work
// with subgraph.
std::string debugString(
typename MatchGraph::NodeRef rootCriteriaRef,
bool invertGraphTraversal) const {
std::ostringstream out;
auto rootNode = rootCriteriaRef->data();
out << "{root = '" << rootNode.getDebugString() << "'";
if (rootNode.getCount() != 1) {
out << ", count = " << rootNode.getCount();
}
if (rootNode.isNonTerminal()) {
out << ", nonTerminal = " << rootNode.isNonTerminal();
}
auto edges = invertGraphTraversal ? rootCriteriaRef->getInEdges()
: rootCriteriaRef->getOutEdges();
if (!edges.empty()) {
out << ", childrenCriteria = [";
for (auto& child : edges) {
auto nextNode = invertGraphTraversal ? child->tail() : child->head();
out << debugString(nextNode, invertGraphTraversal) << ", ";
}
out << "]";
}
out << "}";
return out.str();
}
};
template <typename GraphType>
class SubgraphMatchResult {
public:
// Map from match node to corresponding node in the graph to be scanned.
using MatchNodeMap = std::unordered_map<
typename MatchGraph<GraphType>::NodeRef,
typename GraphType::NodeRef>;
static SubgraphMatchResult<GraphType> notMatched(
const std::string& debugMessage) {
return SubgraphMatchResult<GraphType>(false, debugMessage);
}
static SubgraphMatchResult<GraphType> notMatched() {
return SubgraphMatchResult<GraphType>(
false, "Debug message is not enabled");
}
static SubgraphMatchResult<GraphType> matched(bool ownSubgraph = false) {
return SubgraphMatchResult<GraphType>(true, "Matched", ownSubgraph);
}
bool isMatch() const {
return isMatch_;
}
std::string getDebugMessage() const {
return debugMessage_;
}
std::shared_ptr<typename GraphType::SubgraphType> getMatchedSubgraph() const {
return matchedSubgraph_;
}
std::shared_ptr<MatchNodeMap> getMatchNodeMap() const {
return matchNodeMap_;
}
private:
SubgraphMatchResult(
bool isMatch,
const std::string& debugMessage,
bool ownSubgraph = false)
: isMatch_(isMatch),
debugMessage_(debugMessage),
matchedSubgraph_(
ownSubgraph ? std::make_shared<typename GraphType::SubgraphType>()
: nullptr),
matchNodeMap_(
ownSubgraph ? std::make_shared<MatchNodeMap>()
: nullptr) {}
const bool isMatch_;
const std::string debugMessage_;
const std::shared_ptr<typename GraphType::SubgraphType> matchedSubgraph_;
const std::shared_ptr<MatchNodeMap> matchNodeMap_;
};
} // namespace matcher
} // namespace nom
#endif // NOM_TRANFORMATIONS_SUBGRAPH_MATCHER_H
| 14,898
| 33.648837
| 80
|
h
|
null |
pytorch-main/caffe2/core/nomnigraph/tests/test_util.h
|
#ifndef NOM_TESTS_TEST_UTIL_H
#define NOM_TESTS_TEST_UTIL_H
#include "caffe2/core/common.h"
#include "nomnigraph/Graph/Graph.h"
#include "nomnigraph/Graph/Algorithms.h"
#include "nomnigraph/Representations/NeuralNet.h"
#include "nomnigraph/Converters/Dot.h"
#include <map>
class TestClass {
public:
TestClass() {}
~TestClass() {}
};
struct NNEquality {
static bool equal(
const typename nom::repr::NNGraph::NodeRef& a,
const typename nom::repr::NNGraph::NodeRef& b) {
if (
!nom::repr::nn::is<nom::repr::NeuralNetOperator>(a) ||
!nom::repr::nn::is<nom::repr::NeuralNetOperator>(b)) {
return false;
}
auto a_ = nom::repr::nn::get<nom::repr::NeuralNetOperator>(a);
auto b_ = nom::repr::nn::get<nom::repr::NeuralNetOperator>(b);
bool sameKind = a_->getKind() == b_->getKind();
if (sameKind && a_->getKind() == nom::repr::NeuralNetOperator::NNKind::GenericOperator) {
return a_->getName() == b_->getName();
}
return sameKind;
}
};
// Very simple random number generator used to generate platform independent
// random test data.
class TestRandom {
public:
TestRandom(unsigned int seed) : seed_(seed){};
unsigned int nextInt() {
seed_ = A * seed_ + C;
return seed_;
}
private:
static const unsigned int A = 1103515245;
static const unsigned int C = 12345;
unsigned int seed_;
};
/** Our test graph looks like this:
* +-------+
* | entry |
* +-------+
* |
* |
* v
* +-------+
* | 1 |
* +-------+
* |
* |
* v
* +---+ +-------+
* | 4 | <-- | 2 |
* +---+ +-------+
* | |
* | |
* | v
* | +-------+
* | | 3 |
* | +-------+
* | |
* | |
* | v
* | +-------+
* +-----> | 6 |
* +-------+
* |
* |
* v
* +---+ +-------+
* | 5 | --> | 7 |
* +---+ +-------+
* |
* |
* v
* +-------+
* | exit |
* +-------+
*
* Here is the code used to generate the dot file for it:
*
* auto str = nom::converters::convertToDotString(&graph,
* [](nom::Graph<std::string>::NodeRef node) {
* std::map<std::string, std::string> labelMap;
* labelMap["label"] = node->data();
* return labelMap;
* });
*/
TORCH_API nom::Graph<std::string> createGraph();
TORCH_API nom::Graph<std::string> createGraphWithCycle();
std::map<std::string, std::string> BBPrinter(typename nom::repr::NNCFGraph::NodeRef node);
std::map<std::string, std::string> cfgEdgePrinter(typename nom::repr::NNCFGraph::EdgeRef edge);
std::map<std::string, std::string> NNPrinter(typename nom::repr::NNGraph::NodeRef node);
TORCH_API nom::Graph<TestClass>::NodeRef createTestNode(
nom::Graph<TestClass>& g);
TORCH_API std::map<std::string, std::string> TestNodePrinter(
nom::Graph<TestClass>::NodeRef node);
#endif // NOM_TESTS_TEST_UTIL_H
| 3,129
| 24.867769
| 95
|
h
|
null |
pytorch-main/caffe2/cuda_rtc/common_rtc.h
|
#ifndef CAFFE2_CUDA_RTC_COMMON_RTC_H_
#define CAFFE2_CUDA_RTC_COMMON_RTC_H_
#include <sstream>
#include <string>
#include <cuda.h>
#include <nvrtc.h>
#define NVRTC_CHECK(condition) \
do { \
nvrtcResult result = condition; \
if (result != NVRTC_SUCCESS) { \
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": " \
<< nvrtcGetErrorString(result); \
} \
} while (0)
namespace caffe2 {
template <typename Derived>
class CudaRTCFunction {
public:
CudaRTCFunction() : module_loaded_(false) {}
~CudaRTCFunction() {
if (module_loaded_) {
CUDA_DRIVERAPI_ENFORCE(cuModuleUnload(module_));
}
}
// TODO: this function is nontrivial and since CudaRTCFunction uses CRTP, it
// may potentially increase the binary size. In that case, move common parts
// into a separate function.
template <typename... Args>
void Compile(Args... args) {
string src = static_cast<Derived*>(this)->GetSource(args...);
string name = static_cast<Derived*>(this)->KernelName(args...);
VLOG(1) << "function name: " << name;
VLOG(1) << "function src:\n" << src;
// Actually do the compiling.
nvrtcProgram prog;
NVRTC_CHECK(
nvrtcCreateProgram(&prog, src.c_str(), nullptr, 0, nullptr, nullptr));
// Compile the program.
// TODO(Yangqing): how to find the current gpu architecture instead of hard
// coding it?
const char* nvrtc_opts[] = {
"--gpu-architecture=compute_35", "--use_fast_math"};
nvrtcResult compile_result = nvrtcCompileProgram(prog, 2, nvrtc_opts);
if (compile_result != NVRTC_SUCCESS) {
size_t log_size;
NVRTC_CHECK(nvrtcGetProgramLogSize(prog, &log_size));
vector<char> nvrtc_log(log_size);
NVRTC_CHECK(nvrtcGetProgramLog(prog, nvrtc_log.data()));
LOG(FATAL) << "Compilation failure for nvrtc("
<< nvrtcGetErrorString(compile_result) << "): \n"
<< nvrtc_log.data();
}
size_t ptx_size;
NVRTC_CHECK(nvrtcGetPTXSize(prog, &ptx_size));
vector<char> nvrtc_ptx(ptx_size);
NVRTC_CHECK(nvrtcGetPTX(prog, nvrtc_ptx.data()));
NVRTC_CHECK(nvrtcDestroyProgram(&prog));
// After compilation, load the module.
if (module_loaded_) {
CUDA_DRIVERAPI_ENFORCE(cuModuleUnload(module_));
}
CUDA_DRIVERAPI_ENFORCE(
cuModuleLoadDataEx(&module_, nvrtc_ptx.data(), 0, 0, 0));
module_loaded_ = true;
CUDA_DRIVERAPI_ENFORCE(
cuModuleGetFunction(&kernel_, module_, name.c_str()));
}
template <typename... Args>
void Launch(
unsigned int gx,
unsigned int gy,
unsigned int gz,
unsigned int bx,
unsigned int by,
unsigned int bz,
unsigned int shared_mem,
cudaStream_t stream,
Args... args) {
CAFFE_ENFORCE(
module_loaded_, "Cannot call Launch before a module is loaded.");
void* args_voidp[] = {&args...};
CUDA_DRIVERAPI_ENFORCE(cuLaunchKernel(
kernel_, gx, gy, gz, bx, by, bz, shared_mem, stream, args_voidp, 0));
}
void LaunchEx(
unsigned int gx,
unsigned int gy,
unsigned int gz,
unsigned int bx,
unsigned int by,
unsigned int bz,
unsigned int shared_mem,
cudaStream_t stream,
void** extra) {
CAFFE_ENFORCE(
module_loaded_, "Cannot call Launch before a module is loaded.");
CUDA_DRIVERAPI_ENFORCE(cuLaunchKernel(
kernel_, gx, gy, gz, bx, by, bz, shared_mem, stream, nullptr, extra));
}
private:
bool module_loaded_;
CUmodule module_;
CUfunction kernel_;
};
// TODO: this is in no way unique and is just a hack right now.
inline std::string GetUniqueName() {
static constexpr int len = 20;
static const char alpha[] =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
std::stringstream ss;
ss << "_cuda_kernel_";
for (const auto i : c10::irange(len)) {
ss << alpha[rand() % (sizeof(alpha) - 1)];
}
return ss.str();
}
} // namespace caffe2
#endif // CAFFE2_CUDA_RTC_COMMON_RTC_H_
| 4,300
| 31.583333
| 79
|
h
|
null |
pytorch-main/caffe2/db/create_db_op.h
|
#ifndef CAFFE2_DB_CREATE_DB_OP_H_
#define CAFFE2_DB_CREATE_DB_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/db.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <class Context>
class CreateDBOp final : public Operator<Context> {
public:
CreateDBOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
db_type_(OperatorBase::template GetSingleArgument<string>(
"db_type",
"leveldb")),
db_name_(OperatorBase::template GetSingleArgument<string>("db", "")),
num_shards_(
OperatorBase::template GetSingleArgument<int>("num_shards", 1)),
shard_id_(
OperatorBase::template GetSingleArgument<int>("shard_id", 0)) {
CAFFE_ENFORCE_GT(db_name_.size(), 0, "Must specify a db name.");
}
bool RunOnDevice() final {
OperatorBase::Output<db::DBReader>(0)->Open(
db_type_, db_name_, num_shards_, shard_id_);
return true;
}
private:
string db_type_;
string db_name_;
uint32_t num_shards_;
uint32_t shard_id_;
C10_DISABLE_COPY_AND_ASSIGN(CreateDBOp);
};
} // namespace caffe2
#endif // CAFFE2_DB_CREATE_DB_OP_H_
| 1,190
| 26.697674
| 77
|
h
|
null |
pytorch-main/caffe2/distributed/file_store_handler.h
|
#pragma once
#include <caffe2/distributed/store_handler.h>
namespace caffe2 {
class TORCH_API FileStoreHandler : public StoreHandler {
public:
explicit FileStoreHandler(const std::string& path, const std::string& prefix);
~FileStoreHandler() override;
void set(const std::string& name, const std::string& data) override;
virtual std::string get(
const std::string& name,
const std::chrono::milliseconds& timeout = kDefaultTimeout) override;
int64_t add(const std::string& name, int64_t value) override;
bool deleteKey(const std::string& key) override;
int64_t getNumKeys() override;
bool check(const std::vector<std::string>& names) override;
virtual void wait(
const std::vector<std::string>& names,
const std::chrono::milliseconds& timeout = kDefaultTimeout) override;
protected:
std::string basePath_;
std::string realPath(const std::string& path);
std::string tmpPath(const std::string& name);
std::string objectPath(const std::string& name);
};
} // namespace caffe2
| 1,039
| 24.365854
| 80
|
h
|
null |
pytorch-main/caffe2/distributed/file_store_handler_op.h
|
#pragma once
#include "file_store_handler.h"
#include <caffe2/core/operator.h>
namespace caffe2 {
template <class Context>
class FileStoreHandlerCreateOp final : public Operator<Context> {
public:
explicit FileStoreHandlerCreateOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
basePath_(
OperatorBase::template GetSingleArgument<std::string>("path", "")),
prefix_(OperatorBase::template GetSingleArgument<std::string>(
"prefix",
"")) {
CAFFE_ENFORCE_NE(basePath_, "", "path is a required argument");
}
bool RunOnDevice() override {
auto ptr =
std::unique_ptr<StoreHandler>(new FileStoreHandler(basePath_, prefix_));
*OperatorBase::Output<std::unique_ptr<StoreHandler>>(HANDLER) =
std::move(ptr);
return true;
}
private:
std::string basePath_;
std::string prefix_;
OUTPUT_TAGS(HANDLER);
};
} // namespace caffe2
| 979
| 23.5
| 80
|
h
|
null |
pytorch-main/caffe2/distributed/redis_store_handler.h
|
#pragma once
#include <caffe2/distributed/store_handler.h>
extern "C" {
#include <hiredis/hiredis.h>
}
#include <string>
namespace caffe2 {
class TORCH_API RedisStoreHandler : public StoreHandler {
public:
explicit RedisStoreHandler(std::string& host, int port, std::string& prefix);
virtual ~RedisStoreHandler();
void set(const std::string& name, const std::string& data) override;
virtual std::string get(
const std::string& name,
const std::chrono::milliseconds& timeout = kDefaultTimeout) override;
int64_t add(const std::string& name, int64_t value) override;
int64_t getNumKeys() override;
bool deleteKey(const std::string& key) override;
bool check(const std::vector<std::string>& names) override;
virtual void wait(
const std::vector<std::string>& names,
const std::chrono::milliseconds& timeout = kDefaultTimeout) override;
private:
std::string host_;
int port_;
std::string prefix_;
redisContext* redis_;
std::string compoundKey(const std::string& name);
};
} // namespace caffe2
| 1,060
| 21.574468
| 79
|
h
|
null |
pytorch-main/caffe2/distributed/redis_store_handler_op.h
|
#pragma once
#include "redis_store_handler.h"
#include <caffe2/core/operator.h>
#include <string>
namespace caffe2 {
template <class Context>
class RedisStoreHandlerCreateOp final : public Operator<Context> {
public:
explicit RedisStoreHandlerCreateOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
host_(
OperatorBase::template GetSingleArgument<std::string>("host", "")),
port_(OperatorBase::template GetSingleArgument<int>("port", 0)),
prefix_(OperatorBase::template GetSingleArgument<std::string>(
"prefix",
"")) {
CAFFE_ENFORCE_NE(host_, "", "host is a required argument");
CAFFE_ENFORCE_NE(port_, 0, "port is a required argument");
}
bool RunOnDevice() override {
auto ptr = std::unique_ptr<StoreHandler>(
new RedisStoreHandler(host_, port_, prefix_));
*OperatorBase::Output<std::unique_ptr<StoreHandler>>(HANDLER) =
std::move(ptr);
return true;
}
private:
std::string host_;
int port_;
std::string prefix_;
OUTPUT_TAGS(HANDLER);
};
} // namespace caffe2
| 1,143
| 24.422222
| 79
|
h
|
null |
pytorch-main/caffe2/distributed/store_handler.h
|
#pragma once
#include <chrono>
#include <cstdint>
#include <stdexcept>
#include <string>
#include <vector>
#include "caffe2/core/common.h"
namespace caffe2 {
class TORCH_API StoreHandler {
public:
static constexpr std::chrono::milliseconds kDefaultTimeout =
std::chrono::seconds(30);
static constexpr std::chrono::milliseconds kNoTimeout =
std::chrono::milliseconds::zero();
virtual ~StoreHandler();
/*
* Set data for the key if it doesn't exist.
* If the key exists the data should be the same as the existing key.
*/
virtual void set(const std::string& name, const std::string& data) = 0;
/*
* Get the data for the key.
* The call should wait until the key is stored with specified timeout
* and return data if set else fail.
*/
virtual std::string get(
const std::string& name,
const std::chrono::milliseconds& timeout = kDefaultTimeout) = 0;
/*
* Does an atomic add operation on the key and returns the latest updated
* value.
* Note: To access the current value for this counter call with value = 0
*/
virtual int64_t add(const std::string& name, int64_t value) = 0;
/*
* Returns the number of keys in this store.
*/
virtual int64_t getNumKeys() = 0;
/*
* Removes the specified key from the store.
*/
virtual bool deleteKey(const std::string& key) = 0;
/*
* Check if a keys exist in the store.
*/
virtual bool check(const std::vector<std::string>& names) = 0;
/*
* Wait for Keys to be stored.
*/
virtual void wait(
const std::vector<std::string>& names,
const std::chrono::milliseconds& timeout = kDefaultTimeout) = 0;
};
/*
* The backing store is no longer available. It may have been deleted.
*/
struct TORCH_API StoreHandlerNotAvailableException : public std::runtime_error {
explicit StoreHandlerNotAvailableException(const std::string& msg)
: std::runtime_error(msg) {}
};
#define STORE_HANDLER_NOT_AVAILABLE(...) \
throw ::caffe2::StoreHandlerNotAvailableException( \
::c10::str("[", __FILE__, ":", __LINE__, "] ", __VA_ARGS__));
/*
* Timeout accessing the store.
*/
struct TORCH_API StoreHandlerTimeoutException : public std::runtime_error {
explicit StoreHandlerTimeoutException(const std::string& msg)
: std::runtime_error(msg) {}
};
#define STORE_HANDLER_TIMEOUT(...) \
throw ::caffe2::StoreHandlerTimeoutException( \
::c10::str("[", __FILE__, ":", __LINE__, "] ", __VA_ARGS__));
} // namespace caffe2
| 2,522
| 26.725275
| 80
|
h
|
null |
pytorch-main/caffe2/experiments/operators/fully_connected_op_decomposition.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_DECOMPOSITION_H_
#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_DECOMPOSITION_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
/*
* Although a FC_decomp is just like 2 small FC,
* it is better to have it as one op for future analysis.
* And if we have 2 FC with bias, it is not right.
* TODO(wyiming): decompose the layer into 2 matrices
* W(N * K) = U(N * middle) * trans(V(K * middle))
* */
// This is Caffe's InnerProductOp, with a name that fits its purpose better.
template <typename T, class Context, class Engine = DefaultEngine>
class FullyConnectedOpDecomp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FullyConnectedOpDecomp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
~FullyConnectedOpDecomp() {}
bool RunOnDevice() override {
const auto& X = Input(0);
const auto& U = Input(1);
const auto& V = Input(2);
const auto& b = Input(3);
// auto* buffer_ptr = Output(1);
// Size M * middle;
// auto& multi_buffer_ = *buffer_ptr;
CAFFE_ENFORCE_GE(X.dim(), 1);
CAFFE_ENFORCE_GE(U.dim(), 2);
CAFFE_ENFORCE_GE(V.dim(), 2);
if (X.dim() > 2 || U.dim() > 2 || V.dim() > 2) {
VLOG(1) << "Using legacy support for arbitrary input and weight "
"dimensions.";
}
CAFFE_ENFORCE_EQ(b.dim(), 1);
// batch size
int M = X.dim() > 1 ? X.dim32(0) : 1;
// Feature dimension
int K = X.numel() / M;
// number of outputs.
int N = U.dim32(0);
int middle = U.dim32(0);
CAFFE_ENFORCE_EQ(K, V.dim32(0));
CAFFE_ENFORCE_EQ(N, b.dim32(0));
std::vector<int64_t> dims;
if (X.dim() > 1) {
dims = {M, N};
multi_buffer_.Resize(M, middle);
} else {
dims = {N};
multi_buffer_.Resize(middle);
}
auto* Y = Output(0, dims, at::dtype<T>());
// The col buffer is stored in CHW order as well - kernel_dim, and the
// height and width.
// multi_buffer_.Resize(M, middle);
T* multi_buffer_data = multi_buffer_.template mutable_data<T>();
// X * V * tans(U)
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
middle,
K,
1,
X.template data<T>(),
V.template data<T>(),
0,
multi_buffer_data,
&context_);
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasTrans,
M,
N,
middle,
1,
multi_buffer_data,
U.template data<T>(),
0,
Y->template mutable_data<T>(),
&context_);
// Add bias term
if (bias_multiplier_.numel() != M) {
// If the helper bias multiplier is not M, reshape and fill it with one.
bias_multiplier_.Resize(M);
math::Set<T, Context>(
M,
static_cast<T>(1),
bias_multiplier_.template mutable_data<T>(),
&context_);
}
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
N,
1,
1,
bias_multiplier_.template data<T>(),
b.template data<T>(),
1,
Y->template mutable_data<T>(),
&context_);
return true;
}
protected:
Tensor bias_multiplier_{Context::GetDeviceType()};
Tensor multi_buffer_{Context::GetDeviceType()};
};
template <typename T, class Context, class Engine = DefaultEngine>
class FullyConnectedDecompGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FullyConnectedDecompGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
~FullyConnectedDecompGradientOp() {}
bool RunOnDevice() override {
const auto& X = Input(0);
const auto& U = Input(1);
const auto& V = Input(2);
const auto& dY = Input(3);
TORCH_DCHECK_GE(X.dim(), 1);
TORCH_DCHECK_GE(U.dim(), 2);
TORCH_DCHECK_GE(V.dim(), 2);
TORCH_DCHECK_LE(dY.dim(), 2);
// batch size
int M = X.dim() > 1 ? X.dim32(0) : 1;
// Feature dimension
int K = X.numel() / M;
// number of outputs.
int N = U.dim32(0);
int middle = U.dim32(1);
TORCH_DCHECK_EQ(K, V.dim32(0));
if (dY.dim() > 1) {
TORCH_DCHECK_EQ(M, dY.dim32(0));
TORCH_DCHECK_EQ(N, dY.dim32(1));
} else {
TORCH_DCHECK_EQ(X.dim(), 1);
TORCH_DCHECK_EQ(N, dY.numel());
}
auto* dU = Output(0, U.sizes(), at::dtype<T>());
auto* dV = Output(1, V.sizes(), at::dtype<T>());
auto* db = Output(2, {N}, at::dtype<T>());
// Compute dU
// first compute X * V
du_buffer_.Resize(N, middle);
T* du_buffer_data = du_buffer_.template mutable_data<T>();
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
middle,
K,
1,
X.template data<T>(),
V.template data<T>(),
0,
du_buffer_data,
&context_);
math::Gemm<T, Context, Engine>(
CblasTrans,
CblasNoTrans,
N,
middle,
M,
1,
dY.template data<T>(),
du_buffer_data,
0,
dU->template mutable_data<T>(),
&context_);
// Compute dV
// first compute dY * U
dv_buffer_.Resize(M, middle);
T* dv_buffer_data = dv_buffer_.template mutable_data<T>();
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
middle,
N,
1,
dY.template data<T>(),
U.template data<T>(),
0,
dv_buffer_data,
&context_);
math::Gemm<T, Context, Engine>(
CblasTrans,
CblasNoTrans,
K,
middle,
M,
1,
dY.template data<T>(),
du_buffer_data,
0,
dV->template mutable_data<T>(),
&context_);
if (bias_multiplier_.numel() != M) {
// If the helper bias multiplier is not M, reshape and fill it with one.
bias_multiplier_.Resize(M);
math::Set<T, Context>(
M,
static_cast<T>(1),
bias_multiplier_.template mutable_data<T>(),
&context_);
}
// Compute dB
math::Gemv<T, Context>(
CblasTrans,
M,
N,
1,
dY.template data<T>(),
bias_multiplier_.template data<T>(),
0,
db->template mutable_data<T>(),
&context_);
// Compute dX if necessary.
if (OutputSize() == 4) {
auto* dX = Output(3, X.sizes(), at::dtype<T>());
dx_buffer_.Resize(M, middle);
T* dx_buffer_data = dx_buffer_.template mutable_data<T>();
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
middle,
N,
1,
dY.template data<T>(),
U.template data<T>(),
0,
dx_buffer_data,
&context_);
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasTrans,
M,
K,
middle,
1,
dx_buffer_data,
V.template data<T>(),
0,
dX->template mutable_data<T>(),
&context_);
}
return true;
}
protected:
Tensor bias_multiplier_{Context::GetDeviceType()};
Tensor du_buffer_{Context::GetDeviceType()};
Tensor dv_buffer_{Context::GetDeviceType()};
Tensor dx_buffer_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
| 8,170
| 26.982877
| 80
|
h
|
null |
pytorch-main/caffe2/experiments/operators/fully_connected_op_prune.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_PRUNE_H_
#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_PRUNE_H_
#include <c10/util/Logging.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <int N>
using Shape = std::array<int, N>;
template <int N>
const std::vector<int64_t>& shape(Shape<N> vs) {
static thread_local std::vector<int64_t> cache;
cache.resize(vs.size());
for (const auto i : c10::irange(vs.size())) {
cache[i] = vs[i];
}
return cache;
}
inline const std::vector<int64_t>& shape(int i) {
return shape<1>(Shape<1>({i}));
}
inline const std::vector<int64_t>& shape(int i, int j) {
return shape<2>(Shape<2>({i, j}));
}
template <typename T, class Context>
void MaskMatrix(const T* mask, T* mat, int M, int N);
template <typename T, class Context>
void MaskMatrix_Inc(T* mask_seq, T* mat, int M, int N, int seq_len, T target);
template <typename T, class Context>
void AggrDW(T* ag_dw, const T* dw, int N, int K, Context* context);
template <typename T>
int MatrixCompare_LT(const T* mat, float thres, T* mask_seq, int M, int N);
// TODO(wyiming): write an incremental Mask
// Incremental Mask: only give the new mask positions;
// Assuming that weights masked will not be mask again;
// The incremental mask can also be used to update mask matrix;
// But this will include template for bool and float;
template <>
void MaskMatrix<float, CPUContext>(
const float* mask,
float* mat,
int M,
int N) {
int offset = 0;
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
mat[offset] = mask[offset] ? mat[offset] : 0;
offset++;
}
}
}
template <>
void MaskMatrix_Inc<float, CPUContext>(
float* mask_seq,
float* mat,
int /*M*/,
int /*N*/,
int seq_len,
float target) {
for (const auto i : c10::irange(seq_len)) {
// assume that the mask_seq is smaller than size
// Although it seems that random access gets bad performance,
// we make sure that seq is in order;
mat[static_cast<int>(mask_seq[i])] = target;
}
}
template <>
void AggrDW<float, CPUContext>(
float* ag_dw,
const float* dw,
int N,
int K,
CPUContext* context) {
math::Add<float, CPUContext>(N * K, dw, ag_dw, ag_dw, context);
}
template <>
int MatrixCompare_LT<float>(
const float* mat,
float thres,
float* mask_seq,
int M,
int N) {
int seq_len = 0;
int offset = 0;
for (int i = 0; i < M; ++i) {
for (int j = 0; j < N; ++j) {
if (mat[offset] != 0 && (mat[offset] < thres && mat[offset] > -thres)) {
mask_seq[seq_len++] = static_cast<float>(offset);
}
offset++;
}
}
return seq_len;
}
} // namespace
// This is Caffe's InnerProductOp, with a name that fits its purpose better.
template <typename T, class Context, class Engine = DefaultEngine>
class FullyConnectedOpPrune final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FullyConnectedOpPrune(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
~FullyConnectedOpPrune() {}
bool RunOnDevice() override {
const auto& X = Input(0);
const auto& W = Input(1);
const auto& Mask = Input(2);
const auto& b = Input(3);
CAFFE_ENFORCE_GE(X.dim(), 1);
CAFFE_ENFORCE_GE(W.dim(), 2);
if (X.dim() > 2 || W.dim() > 2) {
VLOG(1) << "Using legacy support for arbitrary input and weight "
"dimensions.";
}
CAFFE_ENFORCE_EQ(b.dim(), 1);
// batch size
int M = X.dim() > 1 ? X.dim32(0) : 1;
// Feature dimension
int K = X.numel() / M;
// number of outputs.
int N = W.dim32(0);
CAFFE_ENFORCE_EQ(K, W.numel() / W.dim32(0));
CAFFE_ENFORCE_EQ(N, b.dim32(0));
std::vector<int64_t> dims;
if (X.dim() > 1) {
dims = {M, N};
} else {
dims = {N};
}
auto* Y = Output(0, dims, at::dtype<T>());
// W * x
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasTrans,
M,
N,
K,
1,
X.template data<T>(),
W.template data<T>(),
0,
Y->template mutable_data<T>(),
&context_);
// Add bias term
if (bias_multiplier_.numel() != M) {
// If the helper bias multiplier is not M,
// reshape and fill it with one.
bias_multiplier_.Resize(M);
math::Set<T, Context>(
M,
static_cast<T>(1),
bias_multiplier_.template mutable_data<T>(),
&context_);
}
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
N,
1,
1,
bias_multiplier_.template data<T>(),
b.template data<T>(),
1,
Y->template mutable_data<T>(),
&context_);
if (OutputSize() == 2) {
auto* Comp_rate = Output(1, vector<int64_t>(), at::dtype<T>());
T* comp_data = Comp_rate->template mutable_data<T>();
math::Sum<T, Context>(
Mask.numel(), Mask.template data<T>(), comp_data, &context_);
math::Scale<float, T, Context>(
1,
static_cast<T>(1.) / Mask.numel(),
comp_data,
comp_data,
&context_);
}
return true;
}
protected:
Tensor bias_multiplier_{Context::GetDeviceType()};
};
template <typename T, class Context, class Engine = DefaultEngine>
class FullyConnectedPruneGradientOp : public Operator<Context> {
public:
int iter_offset;
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FullyConnectedPruneGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {
iter_offset = 0;
}
~FullyConnectedPruneGradientOp() {}
bool RunOnDevice() override {
const auto& X = Input(0);
// const auto& W = Input(1);
auto* W_ptr = Output(2);
auto& W = *W_ptr;
// const auto& Mask = Input(2);
auto* Mask_ptr = Output(3);
auto& Mask = *Mask_ptr;
const auto& dY = Input(3);
// const auto& Ag_dW = Input(4);
auto* Ag_dW_ptr = Output(4);
auto& Ag_dW = *Ag_dW_ptr;
// it is also the Input(5)
// how about get threshold
auto& thres = Input(6);
// TODO(wyiming): check comp_lb is a float
auto& comp_lb = Input(7);
TORCH_DCHECK_GE(X.dim(), 1);
TORCH_DCHECK_GE(W.dim(), 2);
TORCH_DCHECK_LE(dY.dim(), 2);
// batch size
int M = X.dim() > 1 ? X.dim32(0) : 1;
// Feature dimension
int K = X.numel() / M;
// number of outputs.
int N = W.dim32(0);
// TODO(wyiming): add this window_size to workspace?
int window_size = 100;
// TODO(wyiming): this threshold should be
// based on distribution of the layer weight
float thr = 0.01;
TORCH_DCHECK_EQ(Mask.dim32(0), W.dim32(0));
TORCH_DCHECK_EQ(Mask.dim32(1), W.dim32(1));
TORCH_DCHECK_EQ(Ag_dW.dim32(0), W.dim32(0));
TORCH_DCHECK_EQ(Ag_dW.dim32(1), W.dim32(1));
TORCH_DCHECK_EQ(K, W.numel() / W.dim32(0));
if (dY.dim() > 1) {
TORCH_DCHECK_EQ(M, dY.dim32(0));
TORCH_DCHECK_EQ(N, dY.dim32(1));
} else {
TORCH_DCHECK_EQ(X.dim(), 1);
TORCH_DCHECK_EQ(N, dY.numel());
}
auto* dW = Output(0, W.sizes(), at::dtype<T>());
auto* db = Output(1, {N}, at::dtype<T>());
// Compute dW
math::Gemm<T, Context, Engine>(
CblasTrans,
CblasNoTrans,
N,
K,
M,
1,
dY.template data<T>(),
X.template data<T>(),
0,
dW->template mutable_data<T>(),
&context_);
comp_r_buf_.Resize(vector<int64_t>());
T* comp_data = comp_r_buf_.template mutable_data<T>();
math::Sum<T, Context>(
Mask.numel(), Mask.template data<T>(), comp_data, &context_);
math::Scale<float, T, Context>(
1, static_cast<T>(1.) / Mask.numel(), comp_data, comp_data, &context_);
// update W size window
// Notice here we need to maintain state in OP.
// This is new in Caffe2.
// And this is something we might need to discuss in the future.
// at most mask half of the matrix at time
// 1. mask dw with previous mask
MaskMatrix<T, Context>(
Mask.template mutable_data<T>(), dW->template mutable_data<T>(), N, K);
if (*comp_data > *(comp_lb.template data<T>())) {
iter_offset++;
if (iter_offset % window_size == 0) {
// TODO(wyiming):do the prune here;
sum_buffer_.ResizeLike(W);
math::Add<T, Context>(
W.numel(),
W.template mutable_data<T>(),
Ag_dW.template mutable_data<T>(),
sum_buffer_.template mutable_data<T>(),
&context_);
auto* mask_seq_auto = Output(5, W.sizes(), at::dtype<T>());
T* mask_seq = mask_seq_auto->template mutable_data<T>();
math::Set<T, Context>(
N * K,
static_cast<T>(0),
mask_seq_auto->template mutable_data<T>(),
&context_);
// 2. find dw below thres but not eq 0
int seq_len = MatrixCompare_LT<T>(
Ag_dW_ptr->template mutable_data<T>(),
*thres.template data<T>(),
mask_seq,
N,
K);
// 3. use the mask_seq to update W and dw
MaskMatrix_Inc<T, Context>(
mask_seq, dW->template mutable_data<T>(), N, K, seq_len, 0);
MaskMatrix_Inc<T, Context>(
mask_seq, W.template mutable_data<T>(), N, K, seq_len, 0);
MaskMatrix_Inc<T, Context>(
mask_seq, Mask.template mutable_data<T>(), N, K, seq_len, 0);
math::Set<T, Context>(
N * K,
static_cast<T>(0),
Ag_dW.template mutable_data<T>(),
&context_);
} else {
// add dW to Aggregate dW.
AggrDW<T, Context>(
Ag_dW.template mutable_data<T>(),
dW->template mutable_data<T>(),
N,
K,
&context_);
}
}
if (bias_multiplier_.numel() != M) {
// If the helper bias multiplier is not M,
// reshape and fill it with one.
bias_multiplier_.Resize(M);
math::Set<T, Context>(
M,
static_cast<T>(1),
bias_multiplier_.template mutable_data<T>(),
&context_);
}
// Compute dB
math::Gemv<T, Context>(
CblasTrans,
M,
N,
1,
dY.template data<T>(),
bias_multiplier_.template data<T>(),
0,
db->template mutable_data<T>(),
&context_);
// Compute dX if necessary.
if (OutputSize() == 7) {
auto* dX = Output(6, X.sizes(), at::dtype<T>());
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
K,
N,
1,
dY.template data<T>(),
W.template data<T>(),
0,
dX->template mutable_data<T>(),
&context_);
}
return true;
}
protected:
Tensor bias_multiplier_{Context::GetDeviceType()};
Tensor sum_buffer_{Context::GetDeviceType()};
Tensor comp_r_buf_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
| 11,801
| 27.926471
| 79
|
h
|
null |
pytorch-main/caffe2/experiments/operators/fully_connected_op_sparse.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_OPERATORS_FULLY_CONNECTED_OP_SPARSE_H_
#define CAFFE2_OPERATORS_FULLY_CONNECTED_OP_SPARSE_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#ifdef CAFFE2_USE_MKL
#include <mkl.h>
#endif // CAFFE2_USE_MKL
namespace caffe2 {
namespace {
template <int N>
using Shape = std::array<int, N>;
template <int N>
const std::vector<int64_t>& shape(Shape<N> vs) {
static thread_local std::vector<int64_t> cache;
cache.resize(vs.size());
for (const auto i : c10::irange(vs.size())) {
cache[i] = vs[i];
}
return cache;
}
inline const std::vector<int64_t>& shape(int i) {
return shape<1>(Shape<1>({i}));
}
inline const std::vector<int64_t>& shape(int i, int j) {
return shape<2>(Shape<2>({i, j}));
}
template <typename T, class Context>
void Sparse_mm(
const T* acsr,
const int* ia,
const int* ja,
int m,
int k,
int n,
const T* b,
T* c,
Context* context);
template <typename T, class Context>
void trans_mat(const T* o, T* t, int m, int n, Context* context);
template <>
void trans_mat<float, CPUContext>(
const float* o,
float* t,
int m,
int n,
CPUContext* /*context*/) {
for (const auto i : c10::irange(m)) {
for (const auto j : c10::irange(n)) {
t[j * m + i] = o[i * n + j];
}
}
}
// C = A(sparse) * B
// No transpose;
template <>
void Sparse_mm<float, CPUContext>(
const float* acsr,
const int* ia,
const int* ja,
int m,
int k,
int n,
const float* b,
float* c,
CPUContext* /*context*/) {
#ifdef CAFFE2_USE_MKL
float alpha = 1.0, beta = 0.;
mkl_scsrmm(
"N",
&m,
&n,
&k,
&alpha,
"GLNC",
acsr,
ja,
ia,
ia + 1,
b,
&n,
&beta,
c,
&n);
#else
throw std::runtime_error("Not compiled with MKL");
#endif
}
} // namespace
// This is Caffe's InnerProductOp, with a name that fits its purpose better.
template <typename T, class Context, class Engine = DefaultEngine>
class FullyConnectedOp_SPARSE final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FullyConnectedOp_SPARSE(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
~FullyConnectedOp_SPARSE() {}
bool RunOnDevice() override {
const auto& Xt = Input(0); // transposed X
const auto& Wcsr = Input(1);
const auto& iw = Input(2);
const auto& jw = Input(3);
// Notice that we do not need to transpose b
const auto& b = Input(4);
// transposed Y
// here we assume X is k-by-m
CAFFE_ENFORCE_EQ(Xt.dim(), 2);
CAFFE_ENFORCE_EQ(b.dim(), 1);
// batch size
int K = Xt.dim() > 1 ? Xt.dim32(0) : 1;
// Feature dimension
int M = Xt.numel() / K;
// number of outputs.
int N = iw.dim32(0) - 1;
CAFFE_ENFORCE_EQ(N, b.dim32(0));
auto* Yt = Output(0, shape(N, M), at::dtype<T>());
// Y' = W * X';
Sparse_mm<T, Context>(
Wcsr.template data<T>(),
iw.template data<int>(),
jw.template data<int>(),
N,
K,
M,
Xt.template data<T>(),
Yt->template mutable_data<T>(),
&context_);
// Add bias term
if (bias_multiplier_.numel() != M) {
// If the helper bias multiplier is not M, reshape and fill it with one.
bias_multiplier_.Resize(shape(M));
math::Set<T, Context>(
M,
static_cast<T>(1),
bias_multiplier_.template mutable_data<T>(),
&context_);
}
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
N,
M,
1,
1,
b.template data<T>(),
bias_multiplier_.template data<T>(),
1,
Yt->template mutable_data<T>(),
&context_);
return true;
}
protected:
Tensor bias_multiplier_{Context::GetDeviceType()};
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FULLY_CONNECTED_OP_H_
| 4,601
| 22.84456
| 78
|
h
|
null |
pytorch-main/caffe2/experiments/operators/funhash_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_OPERATORS_FUNHASH_OP_H_
#define CAFFE2_OPERATORS_FUNHASH_OP_H_
#include <xxhash.h>
#include <array>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#define SIGN_MAGIC 0x9e3779b97f4a7c15
#define INDEX_MAGIC 0xf39cc0605cedc834
#define USE_SIGN
namespace caffe2 {
template <typename T, class Context>
class FunHashOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FunHashOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
num_outputs_(
OperatorBase::GetSingleArgument<int64_t>("num_outputs", -1)),
num_segments_(
OperatorBase::GetSingleArgument<int64_t>("num_segments", -1)),
seed_(OperatorBase::GetSingleArgument<uint64_t>("seed", 0)) {
CAFFE_ENFORCE(
OperatorBase::HasArgument("num_outputs"),
"Argument `num_outputs` is missing.");
// If alpha is provided, use adaptive hashing parameterized by alpha.
adaptive_ = (InputSize() == 5);
}
bool RunOnDevice() override {
const auto& val = Input(0);
const auto& key = Input(1);
const auto& seg = Input(2);
const auto& weight = Input(3);
int64_t num_alpha = 1;
if (adaptive_) {
const auto& alpha = Input(4);
num_alpha = alpha.size(0);
}
const auto* seg_data = seg.template data<int>();
int64_t num_weight = weight.size(0);
int64_t num_nz_ent = seg.size(0);
int64_t n_segments = num_segments_;
if (num_segments_ == -1) {
for (const auto i : c10::irange(num_nz_ent)) {
if (seg_data[i] > n_segments) {
n_segments = seg_data[i];
}
}
++n_segments;
}
auto* output = Output(0, {n_segments, num_outputs_}, at::dtype<T>());
T* output_data = output->template mutable_data<T>();
memset(output_data, 0, sizeof(T) * n_segments * num_outputs_);
const auto* weight_data = weight.template data<T>();
const auto* alpha_data = adaptive_ ? Input(4).template data<T>() : 0;
const auto* val_data = val.template data<T>();
const auto* key_data = key.template data<int64_t>();
for (const auto j : c10::irange(num_nz_ent)) {
int64_t cur_seg = seg_data[j];
int64_t cur_key = key_data[j];
T cur_val = val_data[j];
int64_t output_stride = cur_seg * num_outputs_;
for (const auto i : c10::irange(num_outputs_)) {
T sum = 0;
for (const auto k : c10::irange(num_alpha)) {
uint64_t hash;
// The hash function takes as input four integers:
// 1. feature index
// 2. output index
// 3. alpha index
// 4. magic number: SIGN_MAGIC for sign (-1/+1)
// INDEX_MAGIC for weight index
hash_data[0] = cur_key;
hash_data[1] = i;
hash_data[2] = k;
hash_data[3] = INDEX_MAGIC;
hash = XXH64(hash_data.data(), hash_data.size(), seed_);
int64_t index = hash % num_weight;
T cur_weight = weight_data[index];
#ifdef USE_SIGN
hash_data[3] = SIGN_MAGIC;
hash = XXH64(hash_data.data(), hash_data.size(), seed_);
if (hash % 2) {
cur_weight = -cur_weight;
}
#endif // USE_SIGN
if (adaptive_) {
sum += cur_weight * alpha_data[k];
} else {
sum += cur_weight;
}
}
output_data[output_stride + i] += sum * cur_val;
}
}
return true;
}
protected:
int64_t num_outputs_;
int64_t num_segments_;
uint64_t seed_;
std::array<uint64_t, 4> hash_data;
bool adaptive_;
};
template <typename T, class Context>
class FunHashGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FunHashGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
num_outputs_(
OperatorBase::GetSingleArgument<int64_t>("num_outputs", -1)),
seed_(OperatorBase::GetSingleArgument<uint64_t>("seed", 0)) {
adaptive_ = (InputSize() == 6);
}
bool RunOnDevice() override {
const auto& grad_out = Input(0);
const auto& val = Input(1);
const auto& key = Input(2);
const auto& seg = Input(3);
const auto& weight = Input(4);
int64_t num_alpha = 1;
T* grad_alpha_data = 0;
if (adaptive_) {
const auto& alpha = Input(5);
num_alpha = alpha.size(0);
auto* grad_alpha = Output(1, alpha.sizes(), at::dtype<T>());
grad_alpha_data = grad_alpha->template mutable_data<T>();
memset(grad_alpha_data, 0, sizeof(T) * num_alpha);
}
const auto* seg_data = seg.template data<int>();
int64_t num_weight = weight.size(0);
int64_t num_nz_ent = seg.size(0);
auto* grad_weight = Output(0, weight.sizes(), at::dtype<T>());
T* grad_weight_data = grad_weight->template mutable_data<T>();
const auto* grad_out_data = grad_out.template data<T>();
const auto* weight_data = weight.template data<T>();
const auto* alpha_data = adaptive_ ? Input(5).template data<T>() : 0;
const auto* val_data = val.template data<T>();
const auto* key_data = key.template data<int64_t>();
memset(grad_weight_data, 0, sizeof(T) * num_weight);
for (const auto j : c10::irange(num_nz_ent)) {
int64_t cur_seg = seg_data[j];
int64_t cur_key = key_data[j];
T cur_val = val_data[j];
int64_t grad_out_stride = cur_seg * num_outputs_;
for (const auto i : c10::irange(num_outputs_)) {
T grad_out_scale = grad_out_data[grad_out_stride + i] * cur_val;
for (const auto k : c10::irange(num_alpha)) {
uint64_t hash;
hash_data[0] = cur_key;
hash_data[1] = i;
hash_data[2] = k;
hash_data[3] = INDEX_MAGIC;
hash = XXH64(hash_data.data(), hash_data.size(), seed_);
int64_t index = hash % num_weight;
T cur_grad_out_scale = grad_out_scale;
#ifdef USE_SIGN
hash_data[3] = SIGN_MAGIC;
hash = XXH64(hash_data.data(), hash_data.size(), seed_);
if (hash % 2) {
cur_grad_out_scale = -cur_grad_out_scale;
}
#endif // USE_SIGN
if (adaptive_) {
grad_alpha_data[k] += cur_grad_out_scale * weight_data[index];
grad_weight_data[index] += alpha_data[k] * cur_grad_out_scale;
} else {
grad_weight_data[index] += cur_grad_out_scale;
}
}
}
}
return true;
}
protected:
int64_t num_outputs_;
uint64_t seed_;
std::array<uint64_t, 4> hash_data;
bool adaptive_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_FUNHASH_OP_H_
| 7,342
| 29.983122
| 75
|
h
|
null |
pytorch-main/caffe2/experiments/operators/sparse_funhash_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_OPERATORS_SPARSE_FUNHASH_OP_H_
#define CAFFE2_OPERATORS_SPARSE_FUNHASH_OP_H_
#include <xxhash.h>
#include <array>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#define HASH_MAGIC 0x9e3779b97f4a7c15
#define USE_SIGN
namespace caffe2 {
template <typename T, class Context>
class SparseFunHashOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
SparseFunHashOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
num_outputs_(
OperatorBase::GetSingleArgument<int64_t>("num_outputs", -1)),
num_segments_(
OperatorBase::GetSingleArgument<int64_t>("num_segments", -1)),
seed_(OperatorBase::GetSingleArgument<uint64_t>("seed", 0)) {
CAFFE_ENFORCE(
OperatorBase::HasArgument("num_outputs"),
"Argument `num_outputs` is missing.");
// If alpha is provided, use adaptive hashing parameterized by alpha.
adaptive_ = (InputSize() == 5);
}
bool RunOnDevice() override {
const auto& val = Input(0);
const auto& key = Input(1);
const auto& seg = Input(2);
const auto& weight = Input(3);
int64_t num_alpha = 1;
if (adaptive_) {
const auto& alpha = Input(4);
num_alpha = alpha.size(0);
}
const auto* seg_data = seg.template data<int>();
int64_t num_weight = weight.size(0);
int64_t num_nz_ent = seg.size(0);
int64_t n_segments = num_segments_;
if (num_segments_ == -1) {
for (const auto i : c10::irange(num_nz_ent)) {
if (seg_data[i] > n_segments) {
n_segments = seg_data[i];
}
}
++n_segments;
}
auto* output = Output(0, {n_segments, num_outputs_}, at::dtype<T>());
T* output_data = output->template mutable_data<T>();
memset(output_data, 0, sizeof(T) * n_segments * num_outputs_);
const auto* weight_data = weight.template data<T>();
const auto* alpha_data = adaptive_ ? Input(4).template data<T>() : 0;
const auto* val_data = val.template data<T>();
const auto* key_data = key.template data<int64_t>();
for (const auto j : c10::irange(num_nz_ent)) {
int64_t cur_seg = seg_data[j];
int64_t cur_key = key_data[j];
T cur_val = val_data[j];
int64_t output_stride = cur_seg * num_outputs_;
for (const auto i : c10::irange(num_outputs_)) {
T sum = 0;
for (const auto k : c10::irange(num_alpha)) {
// The hash function takes as input three integers:
// 1. feature index
// 2. output index
// 3. alpha index
// 4. magic number to improve hashing
hash_data[0] = cur_key;
hash_data[1] = i;
hash_data[2] = k;
hash_data[3] = HASH_MAGIC;
uint64_t hash = XXH64(hash_data.data(), hash_data.size(), seed_);
#ifdef USE_SIGN
// Use the least significant bit for sign, the rest for weights.
int64_t index = (hash >> 1) % num_weight;
T cur_weight = weight_data[index];
if (hash & 1) {
cur_weight = -cur_weight;
}
#else
int64_t index = hash % num_weight;
T cur_weight = weight_data[index];
#endif
if (adaptive_) {
sum += cur_weight * alpha_data[k];
} else {
sum += cur_weight;
}
}
output_data[output_stride + i] += sum * cur_val;
}
}
return true;
}
protected:
int64_t num_outputs_;
int64_t num_segments_;
uint64_t seed_;
std::array<uint64_t, 4> hash_data;
bool adaptive_;
};
template <typename T, class Context>
class SparseFunHashGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
SparseFunHashGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
num_outputs_(
OperatorBase::GetSingleArgument<int64_t>("num_outputs", -1)),
seed_(OperatorBase::GetSingleArgument<uint64_t>("seed", 0)) {
adaptive_ = (InputSize() == 6);
}
bool RunOnDevice() override {
const auto& grad_out = Input(0);
const auto& val = Input(1);
const auto& key = Input(2);
const auto& seg = Input(3);
const auto& weight = Input(4);
int64_t num_alpha = 1;
T* grad_alpha_data = 0;
if (adaptive_) {
const auto& alpha = Input(5);
num_alpha = alpha.size(0);
auto* grad_alpha = Output(2, alpha.sizes(), at::dtype<T>());
grad_alpha_data = grad_alpha->template mutable_data<T>();
memset(grad_alpha_data, 0, sizeof(T) * num_alpha);
}
const auto* seg_data = seg.template data<int>();
int64_t num_weight = weight.size(0);
int64_t num_nz_ent = seg.size(0);
int64_t grad_weight_size = num_nz_ent * num_outputs_ * num_alpha;
auto* grad_weight_val = Output(0, {grad_weight_size}, at::dtype<T>());
T* grad_weight_val_data = grad_weight_val->template mutable_data<T>();
auto* grad_weight_ind = Output(1, {grad_weight_size}, at::dtype<int64_t>());
auto* grad_weight_ind_data =
grad_weight_ind->template mutable_data<int64_t>();
const auto* grad_out_data = grad_out.template data<T>();
const auto* weight_data = weight.template data<T>();
const auto* alpha_data = adaptive_ ? Input(5).template data<T>() : 0;
const auto* val_data = val.template data<T>();
const auto* key_data = key.template data<int64_t>();
int64_t w_ind = 0;
for (const auto j : c10::irange(num_nz_ent)) {
int64_t cur_seg = seg_data[j];
int64_t cur_key = key_data[j];
T cur_val = val_data[j];
int64_t grad_out_stride = cur_seg * num_outputs_;
for (const auto i : c10::irange(num_outputs_)) {
T grad_out_scale = grad_out_data[grad_out_stride + i] * cur_val;
for (const auto k : c10::irange(num_alpha)) {
hash_data[0] = cur_key;
hash_data[1] = i;
hash_data[2] = k;
hash_data[3] = HASH_MAGIC;
uint64_t hash = XXH64(hash_data.data(), hash_data.size(), seed_);
T cur_grad_out_scale = grad_out_scale;
#ifdef USE_SIGN
int64_t index = (hash >> 1) % num_weight;
if (hash & 1) {
cur_grad_out_scale = -cur_grad_out_scale;
}
#else
int64_t index = hash % num_weight;
#endif
if (adaptive_) {
grad_alpha_data[k] += cur_grad_out_scale * weight_data[index];
grad_weight_val_data[w_ind] = alpha_data[k] * cur_grad_out_scale;
} else {
grad_weight_val_data[w_ind] = cur_grad_out_scale;
}
grad_weight_ind_data[w_ind] = index;
++w_ind;
}
}
}
return true;
}
protected:
int64_t num_outputs_;
uint64_t seed_;
std::array<uint64_t, 4> hash_data;
bool adaptive_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SPARSE_FUNHASH_OP_H_
| 7,548
| 30.194215
| 80
|
h
|
null |
pytorch-main/caffe2/experiments/operators/sparse_matrix_reshape_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_OPERATORS_SPARSE_MATRIX_RESHAPE_H_
#define CAFFE2_OPERATORS_SPARSE_MATRIX_RESHAPE_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <class Context>
class SparseMatrixReshapeOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
SparseMatrixReshapeOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {
CAFFE_ENFORCE(
OperatorBase::HasArgument("old_shape"),
"Argument `old_shape` is missing.");
CAFFE_ENFORCE(
OperatorBase::HasArgument("new_shape"),
"Argument `new_shape` is missing.");
vector<int64_t> old_shape =
OperatorBase::GetRepeatedArgument<int64_t>("old_shape");
vector<int64_t> new_shape =
OperatorBase::GetRepeatedArgument<int64_t>("new_shape");
CAFFE_ENFORCE(
old_shape.size() == 2,
"Argument `old_shape` must contain exactly two integers.");
CAFFE_ENFORCE(
new_shape.size() == 2,
"Argument `new_shape` must contain exactly two integers.");
CAFFE_ENFORCE(
old_shape[1] > 0,
"The second dimension in argument `old_shape` must be positive.");
old_stride_ = old_shape[1];
if (old_shape[0] == -1) {
CAFFE_ENFORCE(
new_shape[1] > 0,
"The second dimension in `new_shape` must be positive.");
} else {
CAFFE_ENFORCE(
old_shape[0] > 0,
"The first dimension in `old_shape` must be positive.");
int64_t matrix_size = old_shape[0] * old_shape[1];
if (new_shape[0] == -1) {
CAFFE_ENFORCE(
new_shape[1] > 0,
"Only one dimension in argument `new_shape` can be -1.");
CAFFE_ENFORCE(
matrix_size % new_shape[1] == 0,
"Argument `new_shape` does not agree with `old_shape`.");
} else {
CAFFE_ENFORCE(
new_shape[0] > 0 && (new_shape[1] == -1 || new_shape[1] > 0),
"Dimensions in argument `new_shape` must be positive or -1.");
if (new_shape[1] == -1) {
CAFFE_ENFORCE(
matrix_size % new_shape[0] == 0,
"Argument `new_shape` does not agree with `old_shape`.");
new_shape[1] = matrix_size / new_shape[0];
} else {
CAFFE_ENFORCE(
new_shape[0] * new_shape[1] == matrix_size,
"Argument `new_shape` does not agree with `old_shape`.");
}
}
}
new_stride_ = new_shape[1];
}
bool RunOnDevice() override {
auto& old_col = Input(0);
CAFFE_ENFORCE(old_col.dim() == 1, "Row index tensor must be 1-D.");
auto& old_row = Input(1);
CAFFE_ENFORCE(old_row.dim() == 1, "Column index tensor must be 1-D.");
const auto nnz = old_col.numel();
CAFFE_ENFORCE(
old_row.numel() == nnz,
"Column and row tensors must have the same size.");
auto* new_col = Output(0, {nnz}, at::dtype<int64_t>());
auto* new_row = Output(1, {nnz}, at::dtype<int>());
const auto* old_col_data = old_col.template data<int64_t>();
const auto* old_row_data = old_row.template data<int>();
auto* new_col_data = new_col->template mutable_data<int64_t>();
auto* new_row_data = new_row->template mutable_data<int>();
for (const auto i : c10::irange(nnz)) {
int64_t offset = old_row_data[i] * old_stride_ + old_col_data[i];
new_row_data[i] = offset / new_stride_;
new_col_data[i] = offset % new_stride_;
}
return true;
}
private:
int64_t old_stride_;
int64_t new_stride_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_SPARSE_MATRIX_RESHAPE_H_
| 4,305
| 31.870229
| 75
|
h
|
null |
pytorch-main/caffe2/experiments/operators/tt_contraction_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_OPERATORS_TT_CONTRACTION_OP_H_
#define CAFFE2_OPERATORS_TT_CONTRACTION_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context, class Engine = DefaultEngine>
class TTContractionOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
TTContractionOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
K_(OperatorBase::GetSingleArgument<int64_t>("K", 0)),
M_(OperatorBase::GetSingleArgument<int64_t>("M", 0)),
N_(OperatorBase::GetSingleArgument<int64_t>("N", 0)) {
CAFFE_ENFORCE(OperatorBase::HasArgument("K"), "Argument `K` is missing.");
CAFFE_ENFORCE(OperatorBase::HasArgument("M"), "Argument `M` is missing.");
CAFFE_ENFORCE(OperatorBase::HasArgument("N"), "Argument `N` is missing.");
}
bool RunOnDevice() override {
const auto& A = Input(0);
const auto& B = Input(1);
CAFFE_ENFORCE(A.dim() == 2, A.dim());
int64_t A_size = A.numel();
int64_t B_size = B.numel();
CAFFE_ENFORCE(
K_ * M_ == A_size,
"Argument `K` and `M` do not agree with the size of A.");
CAFFE_ENFORCE(
B_size % (K_ * N_) == 0,
"Argument `K` and `N` do not agree with the size of B.");
int64_t D_ = B_size / (K_ * N_);
int64_t C_size = D_ * M_ * N_;
auto* C = Output(0, vector<int64_t>{C_size}, at::dtype<T>());
int64_t B_stride = K_ * N_;
int64_t C_stride = M_ * N_;
const T* A_data = A.template data<T>();
const T* B_data = B.template data<T>();
T* C_data = C->template mutable_data<T>();
for (int64_t B_index = 0; B_index < B_size; B_index += B_stride) {
math::Gemm<T, Context, Engine>(
CblasTrans,
CblasNoTrans,
M_,
N_,
K_,
1,
A_data,
B_data + B_index,
0,
C_data,
&context_);
C_data += C_stride;
}
return true;
}
protected:
int64_t K_;
int64_t M_;
int64_t N_;
};
template <typename T, class Context, class Engine = DefaultEngine>
class TTContractionGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
TTContractionGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
K_(OperatorBase::GetSingleArgument<int64_t>("K", 0)),
M_(OperatorBase::GetSingleArgument<int64_t>("M", 0)),
N_(OperatorBase::GetSingleArgument<int64_t>("N", 0)) {}
bool RunOnDevice() override {
const auto& G = Input(0);
const auto& A = Input(1);
const auto& B = Input(2);
int64_t G_size = G.numel();
int64_t D_ = G_size / (M_ * N_);
int64_t dB_size = D_ * K_ * N_;
auto* dA = Output(0, A.sizes(), at::dtype<T>());
auto* dB = Output(1, B.sizes(), at::dtype<T>());
int64_t B_stride = K_ * N_;
int64_t G_stride = M_ * N_;
const T* G_data = G.template data<T>();
const T* A_data = A.template data<T>();
const T* B_data = B.template data<T>();
T* dA_data = dA->template mutable_data<T>();
T* dB_data = dB->template mutable_data<T>();
const T* G_ptr = G_data;
for (int64_t B_index = 0; B_index < dB_size; B_index += B_stride) {
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasTrans,
K_,
M_,
N_,
1,
B_data + B_index,
G_ptr,
B_index == 0 ? 0 : 1,
dA_data,
&context_);
G_ptr += G_stride;
}
G_ptr = G_data;
for (int64_t B_index = 0; B_index < dB_size; B_index += B_stride) {
math::Gemm<T, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
K_,
N_,
M_,
1,
A_data,
G_ptr,
0,
dB_data + B_index,
&context_);
G_ptr += G_stride;
}
return true;
}
protected:
int64_t K_;
int64_t M_;
int64_t N_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_TT_CONTRACTION_OP_H_
| 4,750
| 26.462428
| 78
|
h
|
null |
pytorch-main/caffe2/experiments/operators/tt_pad_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_OPERATORS_TT_PAD_OP_H_
#define CAFFE2_OPERATORS_TT_PAD_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context, class Engine = DefaultEngine>
class TTPadOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
TTPadOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
scale_(OperatorBase::GetSingleArgument<int64_t>("scale", 0)) {
CAFFE_ENFORCE(
OperatorBase::HasArgument("scale"), "Argument `scale` is missing.");
}
bool RunOnDevice() override {
const auto& X = Input(0);
auto* X_pad = Output(0);
CAFFE_ENFORCE(&X == X_pad);
CAFFE_ENFORCE(X.dim() == 2, X.dim());
auto X_dim0 = X.size(0);
auto X_dim1 = X.size(1);
auto* X_orig_dim0 = Output(1, {1}, at::dtype<int64_t>());
*X_orig_dim0->template mutable_data<int64_t>() = X_dim0;
if (X_dim0 % scale_ != 0) {
int64_t padded_dim0 = (X_dim0 / scale_ + 1) * scale_;
auto dim0_diff = padded_dim0 - X_dim0;
// set growthPct to the upper bound percentage: (100 * scale_ / X_dim0)
X_pad->Extend(dim0_diff, 100 * scale_ / X_dim0);
auto* X_pad_data = X_pad->template mutable_data<T>();
int64_t X_size = X_dim0 * X_dim1;
memset(X_pad_data + X_size, 0, dim0_diff * X_dim1 * sizeof(T));
}
return true;
}
protected:
int64_t scale_;
};
template <typename T, class Context, class Engine = DefaultEngine>
class TTPadGradientOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
TTPadGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
const auto& G = Input(0);
auto* output = Output(0);
CAFFE_ENFORCE(&G == output);
auto old_dim0 = *Input(1).template data<int64_t>();
auto new_dim0 = G.size(0);
auto dim1 = G.size(1);
if (old_dim0 < new_dim0) {
output->ShrinkTo(old_dim0);
}
return true;
}
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_TT_PAD_OP_H_
| 2,779
| 28.263158
| 77
|
h
|
null |
pytorch-main/caffe2/ideep/ideep_utils.h
|
#pragma once
#include <caffe2/core/macros.h> // For caffe2 macros.
#include <caffe2/utils/eigen_utils.h>
// All caffe2 ideep related headers
#include <ideep.hpp>
#include <caffe2/ideep/utils/ideep_context.h>
#include <caffe2/ideep/utils/ideep_operator.h>
namespace caffe2 {
enum ConvAlgorithm {
CONV_ALGORITHM_AUTO = 0,
CONV_ALGORITHM_WINOGRAD = 1,
CONV_ALGORITHM_MAX
};
enum FusionType {
FUSION_UNKNOWN = 0,
FUSION_CONV_RELU = 1,
FUSION_CONV_SUM = 2,
FUSION_CONV_SUM_RELU = 3,
FUSION_MAX
};
#define USE_IDEEP_DEF_ALIASES() \
/* the tensor type created/handled by iDEEP */ \
using itensor = ideep::tensor; \
/* the date layout of iDEEP tensor */ \
using iformat = ideep::format_tag; \
/* the scales for iDEEP tensor with different data type */ \
using iscale = ideep::scale_t; \
/* the detial algorithm for iDEEP operators, e.g. winograd */ \
using ialgo = ideep::algorithm; \
/* the kind of propagation for iDEEP operators, e.g. forward, training */ \
using iprop = ideep::prop_kind; \
/* the kind of low precision operators, e.g. signed/unsigned activation */ \
using ilowp_kind = ideep::lowp_kind; \
/* the data type of iDEEP tensor, e.g. f32, u8, s8 */ \
using idtype = ideep::tensor::data_type; \
/* the descriptor of iDEEP tensor */ \
using itdesc = ideep::tensor::descriptor; \
/* the attribute for operator to describe the details of inputs&fusion */ \
using iattr = ideep::attr_t; \
/* the detail flags for batch normalization */ \
using ibn_flag = ideep::batch_normalization_flag;
} // namespace caffe2
| 2,209
| 44.102041
| 80
|
h
|
null |
pytorch-main/caffe2/ideep/operators/conv_pool_base_op.h
|
#ifndef CAFFE2_IDEEP_OPERATORS_CONV_POOL_BASE_OP_H_
#define CAFFE2_IDEEP_OPERATORS_CONV_POOL_BASE_OP_H_
#include <vector>
#include "caffe2/ideep/ideep_utils.h"
#include "caffe2/operators/conv_pool_op_base.h"
namespace caffe2 {
class IDEEPConvPoolOpBase : public ConvPoolOpBase<IDEEPContext> {
public:
IDEEPConvPoolOpBase(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<IDEEPContext>(operator_def, ws) {}
~IDEEPConvPoolOpBase() override {}
inline const ideep::tensor& Input(int index) {
return OperatorBase::template Input<ideep::tensor>(index);
}
inline ideep::tensor* Output(int index) {
return OperatorBase::template Output<ideep::tensor>(index);
}
ideep::tensor::dims pad_tl() const {
return {pad_t(), pad_l()};
}
ideep::tensor::dims pad_br() const {
return {pad_b(), pad_r()};
}
ideep::tensor::dims CalcOutputDims(
const ideep::tensor& input,
int output_channel) {
CAFFE_ENFORCE_GT(input.get_size(), 0);
std::vector<int> output_dims;
const auto input_dims = input.get_dims();
std::vector<std::int64_t> input_Tdims(
input_dims.cbegin(), input_dims.cend());
InferOutputSize(
input_Tdims,
output_channel,
StorageOrder::NCHW, //order_,
global_pooling_,
legacy_pad_,
dilation_,
stride_,
&kernel_,
&pads_,
&output_dims);
return {output_dims.begin(), output_dims.end()};
}
bool RunOnDevice() override {
if (!global_pooling_) {
for (const auto dim : c10::irange(kernel_.size())) {
CAFFE_ENFORCE_GT(kernel_[dim], 0);
}
}
try {
return RunOnDeviceWithOrderNCHW();
} catch (ideep::error& e) {
LOG(ERROR) << "IDEEP error:" << e.message;
throw;
}
}
};
#define USE_IDEEP_CONV_POOL_BASE_FUNCTIONS() \
USE_OPERATOR_BASE_FUNCTIONS; \
/* using override */ using IDEEPConvPoolOpBase::Input; \
/* using override */ using IDEEPConvPoolOpBase::Output;
} // namespace caffe2
#endif // CAFFE2_IDEEP_OPERATORS_CONV_POOL_BASE_OP_H_
| 2,115
| 26.128205
| 69
|
h
|
null |
pytorch-main/caffe2/ideep/operators/conv_transpose_unpool_base_op.h
|
#pragma once
#include "caffe2/ideep/ideep_utils.h"
#include "caffe2/proto/caffe2_legacy.pb.h"
using namespace caffe2;
namespace {
class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
public:
USE_IDEEP_DEF_ALIASES();
USE_IDEEP_OPERATOR_FUNCTIONS();
IDEEPConvTransposeUnpoolBase(const OperatorDef& operator_def, Workspace* ws)
: IDEEPOperator(operator_def, ws),
legacy_pad_(
static_cast<LegacyPadding>(OperatorBase::GetSingleArgument<int>(
"legacy_pad",
LegacyPadding::NOTSET))),
kernel_(OperatorBase::GetRepeatedArgument<int>("kernels")),
stride_(OperatorBase::GetRepeatedArgument<int>("strides")),
pads_(OperatorBase::GetRepeatedArgument<int>("pads")),
adj_(OperatorBase::GetRepeatedArgument<int>("adjs")),
shared_buffer_(
OperatorBase::GetSingleArgument<int>("shared_buffer", 0)) {
// For the padding, they should either be the legacy padding strategy
// (VALID or SAME), or an explicit, non-negative value.
if (legacy_pad_ == LegacyPadding::VALID ||
legacy_pad_ == LegacyPadding::SAME) {
CAFFE_ENFORCE(
!OperatorBase::HasArgument("pads"),
"If you use legacy padding VALID or SAME, you should not specify "
"any specific padding values.");
}
// Get old arguments values.
if (OperatorBase::HasArgument("kernel")) {
kernel_.resize(2, OperatorBase::GetSingleArgument<int>("kernel", 0));
} else if (
OperatorBase::HasArgument("kernel_h") &&
OperatorBase::HasArgument("kernel_w")) {
kernel_.push_back(OperatorBase::GetSingleArgument<int>("kernel_h", 0));
kernel_.push_back(OperatorBase::GetSingleArgument<int>("kernel_w", 0));
}
if (OperatorBase::HasArgument("stride")) {
stride_.resize(2, OperatorBase::GetSingleArgument<int>("stride", 0));
} else if (
OperatorBase::HasArgument("stride_h") &&
OperatorBase::HasArgument("stride_w")) {
stride_.push_back(OperatorBase::GetSingleArgument<int>("stride_h", 0));
stride_.push_back(OperatorBase::GetSingleArgument<int>("stride_w", 0));
}
if (OperatorBase::HasArgument("adj")) {
adj_.resize(2, OperatorBase::GetSingleArgument<int>("adj", 0));
} else if (
OperatorBase::HasArgument("adj_h") &&
OperatorBase::HasArgument("adj_w")) {
adj_.push_back(OperatorBase::GetSingleArgument<int>("adj_h", 0));
adj_.push_back(OperatorBase::GetSingleArgument<int>("adj_w", 0));
}
if (OperatorBase::HasArgument("pad")) {
CAFFE_ENFORCE(
legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME,
"If you use legacy padding VALID or SAME, you should not specify "
"any specific padding values.");
pads_.resize(4, OperatorBase::GetSingleArgument<int>("pad", 0));
} else if (
OperatorBase::HasArgument("pad_t") &&
OperatorBase::HasArgument("pad_l") &&
OperatorBase::HasArgument("pad_b") &&
OperatorBase::HasArgument("pad_r")) {
CAFFE_ENFORCE(
legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME,
"If you use legacy padding VALID or SAME, you should not specify "
"any specific padding values.");
pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_t", 0));
pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_l", 0));
pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_b", 0));
pads_.push_back(OperatorBase::GetSingleArgument<int>("pad_r", 0));
}
// Fill default values.
if (kernel_.empty()) {
kernel_.assign({0, 0});
}
if (stride_.empty()) {
stride_.assign(kernel_.size(), 1);
}
if (pads_.empty()) {
pads_.assign(kernel_.size() * 2, 0);
}
if (adj_.empty()) {
adj_.assign(kernel_.size(), 0);
}
CAFFE_ENFORCE_EQ(stride_.size(), kernel_.size());
CAFFE_ENFORCE_EQ(adj_.size(), kernel_.size());
if (legacy_pad_ != LegacyPadding::VALID &&
legacy_pad_ != LegacyPadding::SAME) {
CAFFE_ENFORCE_EQ(pads_.size(), 2 * kernel_.size());
}
for (const auto dim : c10::irange(kernel_.size())) {
CAFFE_ENFORCE_GT(kernel_[dim], 0);
CAFFE_ENFORCE_GT(stride_[dim], 0);
CAFFE_ENFORCE_GE(adj_[dim], 0);
CAFFE_ENFORCE_LE(adj_[dim], stride_[dim]);
}
}
~IDEEPConvTransposeUnpoolBase() override {}
const ideep::tensor& Input(int index) {
return OperatorBase::template Input<ideep::tensor>(index);
}
ideep::tensor* Output(int index) {
return OperatorBase::template Output<ideep::tensor>(index);
}
ideep::tensor::dims pad_tl() const {
return {pad_t(), pad_l()};
}
ideep::tensor::dims pad_br() const {
return {pad_b(), pad_r()};
}
ideep::tensor::dims CalcOutputDims(
const ideep::tensor& input,
int output_channel) {
CAFFE_ENFORCE_GT(input.get_size(), 0);
int N = input.get_dim(0);
ideep::tensor::dims output_dims;
auto input_dims = input.get_dims();
itensor::dims dims;
dims.assign(input_dims.begin() + 2, input_dims.end());
for (const auto dim : c10::irange(dims.size())) {
int dim_size = 0;
ComputeSizeAndPad(
dims[dim],
stride_[dim],
kernel_[dim],
adj_[dim],
&pads_[dim],
&pads_[dim + 2],
&dim_size);
output_dims.push_back(dim_size);
}
output_dims.insert(output_dims.begin(), {N, output_channel});
return output_dims;
}
bool RunOnDevice() override {
try {
return RunOnDeviceWithOrderNCHW();
} catch (ideep::error& e) {
LOG(ERROR) << "IDEEP error:" << e.message;
throw;
}
}
virtual bool RunOnDeviceWithOrderNCHW() {
CAFFE_THROW("Not implemented");
}
private:
LegacyPadding legacy_pad_;
protected:
vector<int> kernel_;
vector<int> stride_;
vector<int> pads_;
vector<int> adj_;
bool shared_buffer_;
// Accessors for 2D conv params.
inline int pad_t() const {
return pads_[0];
}
inline int pad_l() const {
return pads_[1];
}
inline int pad_b() const {
return pads_[2];
}
inline int pad_r() const {
return pads_[3];
}
inline int kernel_h() const {
return kernel_[0];
}
inline int kernel_w() const {
return kernel_[1];
}
inline int stride_h() const {
return stride_[0];
}
inline int stride_w() const {
return stride_[1];
}
inline int adj_h() const {
return adj_[0];
}
inline int adj_w() const {
return adj_[1];
}
inline void ComputeSizeAndPad(
const int in_size,
const int stride,
const int kernel,
const int adj,
int* pad_head,
int* pad_tail,
int* out_size) {
switch (legacy_pad_) {
case LegacyPadding::NOTSET:
CAFFE_ENFORCE_GE(*pad_head, 0);
CAFFE_ENFORCE_GE(*pad_tail, 0);
*out_size =
(in_size - 1) * stride + kernel + adj - *pad_head - *pad_tail;
break;
// We handle cases of LegacyPadding::VALID and LegacyPadding::SAME
// the same way
case LegacyPadding::VALID:
case LegacyPadding::SAME:
*pad_head = 0;
*pad_tail = 0;
*out_size = (in_size - 1) * stride + kernel + adj;
break;
case LegacyPadding::CAFFE_LEGACY_POOLING:
LOG(FATAL) << "CAFFE_LEGACY_POOLING is no longer supported.";
break;
}
}
};
#define USE_IDEEP_CONV_TRANSPOSE_UNPOOL_BASE_FUNCTIONS() \
USE_OPERATOR_BASE_FUNCTIONS; \
/* using override */ using IDEEPConvTransposeUnpoolBase::Input; \
/* using override */ using IDEEPConvTransposeUnpoolBase::Output;
} // namespace
| 7,792
| 28.518939
| 78
|
h
|
null |
pytorch-main/caffe2/ideep/operators/operator_fallback_ideep.h
|
#pragma once
#include <caffe2/core/common.h>
#include <caffe2/core/context.h>
#include <caffe2/core/operator.h>
#include <caffe2/ideep/ideep_utils.h>
#include <caffe2/proto/caffe2_pb.h>
namespace caffe2 {
/**
* @brief A templated class to allow one to wrap a CPU operator as an IDEEP
* operator.
*
* This class can be used when one does not have the IDEEP implementation ready
* yet for an operator. Essentially, what this op does is to automatically
* deal with data copy for you. Plausibly, this causes a lot of overhead and
* is not optimal, so you should use this operator mostly for quick prototyping
* purpose.
*
* All the input and output of the original operator should be TensorCPU.
*
* Example usage: if you have a class MyMagicOp that is CPU based, and you use
* the registration code
* REGISTER_CPU_OPERATOR(MyMagic, MyMagicOp);
* to register the CPU side, you can create its corresponding IDEEP operator
* (with performance hits of course) via
* REGISTER_IDEEP_OPERATOR(MyMagic,
* IDEEPFallbackOp<MyMagicOp>);
*
* Advanced usage: if you want to have some specific outputs never copied, you
* can use the SkipOutputCopy template argument to do that. For example, if
* MyMagic produces two outputs and the first output is always going to live on
* the CPU, you can do
* REGISTER_IDEEP_OPERATOR(MyMagic,
* IDEEPFallbackOp<MyMagicOp, SkipIndices<0>>);
*/
template <class CPUOp, typename SkipOutputCopy = SkipIndices<>>
class IDEEPFallbackOp final : public IDEEPOperator {
public:
USE_IDEEP_DEF_ALIASES();
USE_IDEEP_OPERATOR_FUNCTIONS();
IDEEPFallbackOp(const OperatorDef& def, Workspace* ws)
: IDEEPOperator(def, ws) {
CAFFE_ENFORCE_EQ(def.device_option().device_type(), PROTO_IDEEP);
base_def_.CopyFrom(def);
// base_def_ runs on CPU, so we will set its device option to CPU.
// Copy to allow random_seed to be correctly propagated.
base_def_.mutable_device_option()->CopyFrom(def.device_option());
base_def_.mutable_device_option()->set_device_type(PROTO_CPU);
// Create output blobs in parent workspace,
// then forward output blobs to local workspace.
std::unordered_map<string, string> forwarded_output_blobs;
for (const auto i : c10::irange(base_def_.output_size())) {
// For in-place case, the in/output tensor for local_ws must be
// re-created, instead of forwarding from current workspace.
string parent_name(base_def_.output(i));
if (!SkipOutputCopy::Contains(i)) {
parent_name += "_cpu_output_blob_" + base_def_.type();
}
local_output_blobs_.push_back(ws->CreateBlob(parent_name));
TORCH_CHECK_NOTNULL(local_output_blobs_.back());
forwarded_output_blobs[base_def_.output(i)] = parent_name;
output_inplace_.push_back(false);
for (const string &input_name : base_def_.input()) {
if (input_name == base_def_.output(i)) {
output_inplace_[i] = true;
break;
}
}
}
local_ws_.reset(new Workspace(ws, forwarded_output_blobs));
// Set up the symbols for the local workspace.
for (const string& name : base_def_.input()) {
local_input_blobs_.push_back(local_ws_->CreateBlob(name));
TORCH_CHECK_NOTNULL(local_input_blobs_.back());
}
input_share_.resize(local_input_blobs_.size(), false);
base_op_.reset(new CPUOp(base_def_, local_ws_.get()));
}
bool RunOnDevice() override {
for (const auto i : c10::irange(InputSize())) {
if (InputIsType<itensor>(i)
&& (Input(i).has_scale()
|| Input(i).get_data_type() == idtype::f32)) {
auto& input = Input(i);
if (input_share_[i]) {
local_input_blobs_[i]->Reset();
input_share_[i] = false;
}
auto dtensor = BlobGetMutableTensor(local_input_blobs_[i], CPU);
dtensor->Resize(input.get_dims());
// If fallback from INT8, the public format of original input is nhwc.
// While the required format is nchw, need to reorder to nchw.
if (input.get_desc().is_nhwc()) {
itensor temp_ten ({input.get_dims(), idtype::f32, iformat::nchw},
dtensor->template mutable_data<float>());
temp_ten.feed_from(input);
} else if (!input.need_reorder()) {
CAFFE_ENFORCE(!input.has_scale(),
"Incorrect invocation of get_data_handle");
dtensor->ShareExternalPointer(
static_cast<float*>(input.get_data_handle()));
} else {
input.to_public(dtensor->template mutable_data<float>());
}
} else {
VLOG(1) << "Input " << i << " is not ideep::tensor. Skipping copy.";
if (OperatorBase::Inputs()[i]->GetRaw() != local_input_blobs_[i]->GetRaw()) {
// Note(jiayq): This removes a const but conceptually
// local_input_blobs will only be used as const blob input for the
// base op so we are still fine.
local_input_blobs_[i]->ShareExternal(
const_cast<void *>(OperatorBase::Inputs()[i]->GetRaw()),
OperatorBase::Inputs()[i]->meta());
}
input_share_[i] = true;
}
}
// Some CPU ops inherited from OperatorBase directly might need this default
// input argument '0' like 'PrefetchOperator'.
if (!base_op_->Run(0)) {
LOG(ERROR) << "Base op run failed in IDEEPFallbackOp. Def: "
<< ProtoDebugString(this->debug_def());
return false;
}
for (const auto i : c10::irange(OutputSize())) {
if (SkipOutputCopy::Contains(i)) {
VLOG(1) << "Copy output: index " << i << " skipped.";
continue;
}
CAFFE_ENFORCE(
BlobIsTensorType(*local_output_blobs_[i], CPU),
"IDEEP fallback op currently does not support non-TensorCPU "
"output type who needs copying.");
const auto& src = local_output_blobs_[i]->template Get<TensorCPU>();
auto src_dims = src.sizes().vec();
if (src.template IsType<float>() && src.dim() != 0 && base_op_->type() != "Python") {
Blob* dst = OperatorBase::OutputBlob(i);
// The output tensor must be ideep tensor with public format.
// If reusing ideep tensor with non-public format, the tensor buffer
// will be interpreted incorrectly.
if (!dst->template IsType<itensor>() ||
!dst->template Get<itensor>().is_public_format()) {
dst->Reset(new itensor());
}
itensor::dims dst_dims (src_dims.begin(), src_dims.end());
auto dtensor = dst->template GetMutable<itensor>();
if (dtensor->get_dims() != dst_dims) {
dtensor->resize(dst_dims, idtype::f32);
}
if (output_inplace_[i]) {
dtensor->feed_from(dst_dims, idtype::f32,
const_cast<void*>(src.raw_data()));
} else {
CAFFE_ENFORCE(!dtensor->has_scale(),
"Incorrect invocation of set_data_handle");
dtensor->set_data_handle(const_cast<void *>(src.raw_data()));
}
} else {
VLOG(2) << "Output " << base_def_.output(i) << " as CPUTensor";
Blob* dst = OperatorBase::OutputBlob(i);
if (output_inplace_[i]) {
auto dtensor = BlobGetMutableTensor(dst, CPU);
dtensor->CopyFrom(src);
} else {
dst->Reset(new Tensor(CPU));
BlobSetTensor(dst, src.Alias());
}
}
}
return true;
}
protected:
vector<Blob*> local_input_blobs_;
vector<Blob*> local_output_blobs_;
vector<bool> output_inplace_;
vector<bool> input_share_;
std::unique_ptr<CPUOp> base_op_;
std::unique_ptr<Workspace> local_ws_;
OperatorDef base_def_;
};
} // namespace caffe2
| 7,776
| 39.717277
| 91
|
h
|
null |
pytorch-main/caffe2/ideep/utils/ideep_context.h
|
#pragma once
#include <cstdlib>
#include <ctime>
#include <random>
#include <caffe2/core/context.h>
namespace caffe2 {
class IDEEPContext final : public BaseContext {
public:
typedef std::mt19937 rand_gen_type;
IDEEPContext() : random_seed_(RandomNumberSeed()) {}
explicit IDEEPContext(const DeviceOption& option)
: random_seed_(
option.has_random_seed() ? option.random_seed()
: RandomNumberSeed()) {
CAFFE_ENFORCE_EQ(option.device_type(), PROTO_IDEEP);
}
explicit IDEEPContext(const at::Device& device)
: IDEEPContext(DeviceToOption(device)) {}
~IDEEPContext() noexcept override {}
inline void SwitchToDevice(int64_t /*stream_id*/) override {}
using BaseContext::SwitchToDevice;
inline void WaitEvent(const Event& ev) override {
ev.Wait(IDEEP, this);
}
inline void Record(Event* ev, const char* err_msg = nullptr) const override {
CAFFE_ENFORCE(ev, "Event must not be null.");
ev->Record(IDEEP, this, err_msg);
}
inline void FinishDeviceComputation() override {}
inline rand_gen_type& RandGenerator() {
if (!random_generator_.get()) {
random_generator_.reset(new rand_gen_type(random_seed_));
}
return *random_generator_.get();
}
inline static at::DataPtr New(size_t nbytes) {
return GetAllocator(CPU)->allocate(nbytes);
}
void CopyBytesSameDevice(size_t nbytes, const void* src, void* dst) override {
if (nbytes == 0) {
return;
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
void CopyBytesFromCPU(size_t nbytes, const void* src, void* dst) override {
CopyBytesSameDevice(nbytes, src, dst);
}
void CopyBytesToCPU(size_t nbytes, const void* src, void* dst) override {
CopyBytesSameDevice(nbytes, src, dst);
}
bool SupportsNonFundamentalTypes() const override {
// IDEEP meta copy is OK
return true;
}
// Two copy functions that deals with cross-device copies.
template <class SrcContext, class DstContext>
inline void CopyBytes(size_t nbytes, const void* src, void* dst);
template <typename T, class SrcContext, class DstContext>
inline void Copy(size_t n, const T* src, T* dst) {
if (c10::guts::is_fundamental<T>::value) {
CopyBytes<SrcContext, DstContext>(
n * sizeof(T),
static_cast<const void*>(src),
static_cast<void*>(dst));
} else {
for (const auto i : c10::irange(n)) {
dst[i] = src[i];
}
}
}
template <class SrcContext, class DstContext>
inline void
CopyItems(const TypeMeta meta, size_t n, const void* src, void* dst) {
if (meta.copy()) {
meta.copy()(src, dst, n);
} else {
CopyBytes<SrcContext, DstContext>(n * meta.itemsize(), src, dst);
}
}
static bool HasAsyncPartDefault() {
return false;
}
static bool SupportsAsyncScheduling() {
return false;
}
static bool IsStreamFree(const DeviceOption& /* unused */, int /* unused */) {
return true;
}
at::Device device() const override {
return at::Device(IDEEP);
}
DeviceType device_type() const override {
return IDEEP;
}
static constexpr DeviceType GetDeviceType() {
return IDEEP;
}
protected:
// TODO(jiayq): instead of hard-coding a generator, make it more flexible.
int random_seed_{1701};
std::unique_ptr<rand_gen_type> random_generator_;
};
template <>
inline void IDEEPContext::CopyBytes<IDEEPContext, IDEEPContext>(
size_t nbytes,
const void* src,
void* dst) {
if (nbytes == 0) {
return;
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
template <>
inline void IDEEPContext::CopyBytes<CPUContext, IDEEPContext>(
size_t nbytes,
const void* src,
void* dst) {
if (nbytes == 0) {
return;
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
template <>
inline void IDEEPContext::CopyBytes<IDEEPContext, CPUContext>(
size_t nbytes,
const void* src,
void* dst) {
if (nbytes == 0) {
return;
}
CAFFE_ENFORCE(src);
CAFFE_ENFORCE(dst);
memcpy(dst, src, nbytes);
}
} // namespace caffe2
| 4,170
| 23.25
| 80
|
h
|
null |
pytorch-main/caffe2/ideep/utils/ideep_operator.h
|
#pragma once
#include <ideep.hpp>
#include <caffe2/core/operator.h>
#include <caffe2/proto/caffe2_pb.h>
namespace caffe2 {
C10_DECLARE_REGISTRY(
IDEEPOperatorRegistry,
OperatorBase,
const OperatorDef&,
Workspace*);
#define REGISTER_IDEEP_OPERATOR_CREATOR(key, ...) \
C10_REGISTER_CREATOR(IDEEPOperatorRegistry, key, __VA_ARGS__)
#define REGISTER_IDEEP_OPERATOR(name, ...) \
C10_REGISTER_CLASS(IDEEPOperatorRegistry, name, __VA_ARGS__)
#define REGISTER_IDEEP_OPERATOR_WITH_ENGINE(name, engine, ...) \
C10_REGISTER_CLASS(IDEEPOperatorRegistry, name##_ENGINE_##engine, __VA_ARGS__)
#define REGISTER_IDEEP_OPERATOR_STR(str_name, ...) \
C10_REGISTER_TYPED_CLASS(IDEEPOperatorRegistry, str_name, __VA_ARGS__)
#define REGISTER_IDEEP_COMPARE_OPERATOR(Op) \
REGISTER_IDEEP_OPERATOR( \
Op, \
IDEEPFallbackOp<BinaryElementwiseOp< \
TensorTypes<bool, int32_t, int64_t, float, double>, \
CPUContext, \
Op##Functor<CPUContext>, \
FixedType<bool>>>)
// IDEEPOperator is the base scaffolding of the operators that uses IDEEP. It
// provides a few operators that are useful to IDEEP specific implementations.
class IDEEPOperator : public OperatorBase {
public:
explicit IDEEPOperator(const OperatorDef& operator_def, Workspace* ws)
: OperatorBase(operator_def, ws),
context_(operator_def.device_option()),
order_(StringToStorageOrder(
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
}
~IDEEPOperator() override {}
inline const ideep::tensor& Input(int index) {
return OperatorBase::template Input<ideep::tensor>(index);
}
inline ideep::tensor* Output(int index) {
return OperatorBase::template Output<ideep::tensor>(index);
}
// The run function of Operator switches to the device, and then carries out
// the actual computation with RunOnDevice(). You should implement RunOnDevice
// instead of Run().
bool Run(int /* unused */ /*stream_id*/) final {
// Since IDEEP does not need to do SwithToDevice and
// FinishDeviceComputation,
// it is always just a re-route to RunOnDevice().
try {
StartAllObservers();
bool result = RunOnDevice();
StopAllObservers();
return result;
} catch (EnforceNotMet& err) {
TORCH_RETHROW(err, getErrorMsg());
} catch (ideep::error& e) {
LOG(ERROR) << "IDEEP error:" << e.message;
throw;
}
}
// Waits for a previous event. Note that to properly wait and run
// asynchronously, WaitEvent, RunAsync and Record should all be executed
// on the same CPU thread.
void WaitEvent(const Event& ev, int /* unused */) final {
context_.WaitEvent(ev);
}
void WaitEvents(const std::vector<const Event*>& events, int /* unused */)
final {
for (const auto& ev : events) {
context_.WaitEvent(*ev);
}
}
void RecordEvent(const char* err_msg = nullptr) final {
if (event_) {
context_.Record(event_.get(), err_msg);
}
}
virtual bool RunOnDevice() = 0;
protected:
std::string getErrorMsg() {
if (has_debug_def()) {
return "Error from operator: " + ProtoDebugString(debug_def());
} else {
return "Error from operator: no op def";
}
}
IDEEPContext context_;
StorageOrder order_;
};
#define USE_IDEEP_OPERATOR_FUNCTIONS() \
USE_OPERATOR_BASE_FUNCTIONS; \
/* using override */ using IDEEPOperator::Input; \
/* using override */ using IDEEPOperator::Output; \
/* using override */ using IDEEPOperator::order_; \
/* using override */ using IDEEPOperator::context_;
#define USE_SIMPLE_IDEEP_CTOR_DTOR(name) \
name(const OperatorDef& operator_def, Workspace* ws) \
: IDEEPOperator(operator_def, ws) {} \
~name() override {}
// Convert zero_point scales to min_max scales
// NOTE:
// The scales in operator is saved in FBGEMM format,
// while FBGEMM scales are the reciprocals of MKL-DNN scales.
// This function is provided to convert scales from FBGEMM to MKL-DNN
inline ideep::scale_t ConvertScales(
const std::vector<float> scales_z) {
ideep::scale_t scales (scales_z);
for (auto it = scales.begin(); it != scales.end(); it++) {
*it = 1.0f / *it;
}
return scales;
}
inline ideep::tensor::dims CanonicalDims(
ideep::tensor::dims adims, int32_t axis) {
CAFFE_ENFORCE(axis < (int32_t)adims.size(), "Invalid axis!");
CAFFE_ENFORCE(axis > (int32_t)-adims.size(), "Invalid axis!");
if (adims.size() == 2 || axis == 1)
return adims;
if (axis < 0) {
axis += (int32_t)adims.size();
}
auto dim0 = std::accumulate(adims.begin(), adims.begin() + axis, 1,
std::multiplies<ideep::tensor::dim_t>());
auto dim1 = std::accumulate(adims.begin() + axis, adims.end(), 1,
std::multiplies<ideep::tensor::dim_t>());
return ideep::tensor::dims({dim0, dim1});
}
} // namespace caffe2
| 5,379
| 34.629139
| 80
|
h
|
null |
pytorch-main/caffe2/image/transform_gpu.h
|
#ifndef CAFFE2_IMAGE_TRANSFORM_GPU_H_
#define CAFFE2_IMAGE_TRANSFORM_GPU_H_
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
#include "caffe2/core/context.h"
namespace caffe2 {
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context);
} // namespace caffe2
#endif
| 1,705
| 37.772727
| 82
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/ios/ios_caffe.h
|
#ifdef __cplusplus
#include <string>
#include <vector>
#include "caffe2/mobile/contrib/ios/ios_caffe_defines.h"
#include "caffe2/mobile/contrib/ios/ios_caffe_predictor.h"
#include "caffe2/predictor/predictor.h"
extern "C" {
IOS_CAFFE_EXPORT Caffe2IOSPredictor* MakeCaffe2Predictor(const std::string& init_net_str,
const std::string& predict_net_str,
bool disableMultithreadProcessing,
bool allowMetalOperators,
std::string& errorMessage);
IOS_CAFFE_EXPORT void GenerateStylizedImage(std::vector<float>& originalImage,
const std::string& init_net_str,
const std::string& predict_net_str,
int height,
int width,
std::vector<float>& dataOut);
}
#endif
| 1,102
| 41.423077
| 92
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/ios/ios_caffe_predictor.h
|
#pragma once
#include <string>
#include "caffe2/core/net.h"
#include "caffe2/mobile/contrib/ios/ios_caffe_defines.h"
#include "caffe2/predictor/predictor.h"
struct Tensor {
std::vector<int64_t> dims;
uint8_t* data;
};
class IOS_CAFFE_EXPORT Caffe2IOSPredictor final {
public:
/**
@allowMetalOperators Allow converting eligible operators to Metal GPU framework accelerated
operators. Setting this flag to true doesn't guarantee predictor will be using Metal operators;
Client code must check usingMetalOperators flag to determine predictor is using them.
*/
static Caffe2IOSPredictor* NewCaffe2IOSPredictor(const caffe2::NetDef& init_net,
const caffe2::NetDef& predict_net,
bool disableMultithreadProcessing,
bool allowMetalOperators);
void run(const Tensor& inData, Tensor& outData, std::string& errorMessage);
~Caffe2IOSPredictor(){};
const bool usingMetalOperators;
private:
Caffe2IOSPredictor(const caffe2::NetDef& init_net,
const caffe2::NetDef& predict_net,
bool disableMultithreadProcessing,
bool usingMetalOperators);
caffe2::Predictor predictor_;
};
| 1,312
| 34.486486
| 98
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/ios/mpscnn/mpscnn.h
|
#pragma once
#include "caffe2/core/net.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
static constexpr const char* kMPSCNNReadCountArg = "__mpscnn_read_count__";
static constexpr const char* kMPSCNNOutputIsTempImageArg = "__mpscnn_output_is_temp_img__";
static constexpr const int kMetalMaxTextureArrLength = 2048;
// We currently only try to convert a fixed set of operators that handle a subset of a full
// CNN. We also only run when MPSCNN is available, provides a speedup.
// On failure, returns false. On success, returns true, and sets the MPSCNN net in the output
// parameter.
bool tryConvertToMPSCNN(const NetDef& initNet, const NetDef& predictNet, NetDef* mpscnnPredictNet);
// Exposed for testing.
NetDef annotateDefWithReadCounts(const NetDef& net);
NetDef rewriteForMetal(const NetDef& net);
NetDef runMPSCNNFusion(const NetDef& net);
void dumpDef(const NetDef& d);
void mpscnnRecordExecutionFinish();
} // namespace caffe2
| 948
| 38.541667
| 99
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/ios/mpscnn/mpscnn_context.h
|
#pragma once
#import <Metal/MTLBuffer.h>
#import <Metal/MTLDevice.h>
#import <Metal/MTLLibrary.h>
#include <array>
#include <mutex>
#include <string>
#include <thread>
#include <unordered_map>
namespace caffe2 {
struct MPSCNNContext {
public:
id<MTLDevice> device;
id<MTLCommandQueue> commandQueue;
id<MTLLibrary> library;
id<MTLComputePipelineState> getPipelineState(NSString* kernel);
id<MTLComputePipelineState> getSpecializedPipelineState(NSString* kernel,
const std::vector<ushort>& constants);
private:
std::mutex pipelineCacheMutex_;
std::unordered_map<std::string, id<MTLComputePipelineState>> pipelineCache_;
};
// get the singleton instance.
MPSCNNContext& getMPSCNNContext();
} // namespace caffe2
| 791
| 22.294118
| 96
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/ios/mpscnn/mpscnn_graph_mask.h
|
#pragma once
#include "caffe2/core/net.h"
#include "mpscnn.h"
namespace caffe2 {
// We currently only try to convert a fixed set of operators that handle a subset of a full
// CNN. We also only run when MPSCNN is available, provides a speedup.
// On failure, returns false. On success, returns true, and sets the MPSCNN net in the output
// parameter.
// The rewrite function now supports insertion of copies in intermediate ops.
bool tryConvertToMPSCNNIntermediateCopies(const NetDef& initNet,
const NetDef& predictNet,
NetDef* mpscnnPredictNet);
NetDef setSpecialArgs(const NetDef& def);
} // namespace caffe2
| 698
| 40.117647
| 93
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/libopencl-stub/include/CL/cl_ext.h
|
/*******************************************************************************
* Copyright (c) 2008 - 2012 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
******************************************************************************/
/* $Revision: 11928 $ on $Date: 2010-07-13 09:04:56 -0700 (Tue, 13 Jul 2010) $ */
/* cl_ext.h contains OpenCL extensions which don't have external */
/* (OpenGL, D3D) dependencies. */
#ifndef __CL_EXT_H
#define __CL_EXT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __APPLE__
#include <OpenCL/cl.h>
#include <AvailabilityMacros.h>
#else
#include <CL/cl.h>
#endif
/* cl_khr_fp16 extension - no extension #define since it has no functions */
#define CL_DEVICE_HALF_FP_CONFIG 0x1033
/* Memory object destruction
*
* Apple extension for use to manage externally allocated buffers used with cl_mem objects with CL_MEM_USE_HOST_PTR
*
* Registers a user callback function that will be called when the memory object is deleted and its resources
* freed. Each call to clSetMemObjectCallbackFn registers the specified user callback function on a callback
* stack associated with memobj. The registered user callback functions are called in the reverse order in
* which they were registered. The user callback functions are called and then the memory object is deleted
* and its resources freed. This provides a mechanism for the application (and libraries) using memobj to be
* notified when the memory referenced by host_ptr, specified when the memory object is created and used as
* the storage bits for the memory object, can be reused or freed.
*
* The application may not call CL api's with the cl_mem object passed to the pfn_notify.
*
* Please check for the "cl_APPLE_SetMemObjectDestructor" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
* before using.
*/
#define cl_APPLE_SetMemObjectDestructor 1
cl_int CL_API_ENTRY clSetMemObjectDestructorAPPLE( cl_mem /* memobj */,
void (* /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),
void * /*user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
/* Context Logging Functions
*
* The next three convenience functions are intended to be used as the pfn_notify parameter to clCreateContext().
* Please check for the "cl_APPLE_ContextLoggingFunctions" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
* before using.
*
* clLogMessagesToSystemLog forwards on all log messages to the Apple System Logger
*/
#define cl_APPLE_ContextLoggingFunctions 1
extern void CL_API_ENTRY clLogMessagesToSystemLogAPPLE( const char * /* errstr */,
const void * /* private_info */,
size_t /* cb */,
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
/* clLogMessagesToStdout sends all log messages to the file descriptor stdout */
extern void CL_API_ENTRY clLogMessagesToStdoutAPPLE( const char * /* errstr */,
const void * /* private_info */,
size_t /* cb */,
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
/* clLogMessagesToStderr sends all log messages to the file descriptor stderr */
extern void CL_API_ENTRY clLogMessagesToStderrAPPLE( const char * /* errstr */,
const void * /* private_info */,
size_t /* cb */,
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
/************************
* cl_khr_icd extension *
************************/
#define cl_khr_icd 1
/* cl_platform_info */
#define CL_PLATFORM_ICD_SUFFIX_KHR 0x0920
/* Additional Error Codes */
#define CL_PLATFORM_NOT_FOUND_KHR -1001
extern CL_API_ENTRY cl_int CL_API_CALL
clIcdGetPlatformIDsKHR(cl_uint /* num_entries */,
cl_platform_id * /* platforms */,
cl_uint * /* num_platforms */);
typedef CL_API_ENTRY cl_int (CL_API_CALL *clIcdGetPlatformIDsKHR_fn)(
cl_uint /* num_entries */,
cl_platform_id * /* platforms */,
cl_uint * /* num_platforms */);
/* Extension: cl_khr_image2D_buffer
*
* This extension allows a 2D image to be created from a cl_mem buffer without a copy.
* The type associated with a 2D image created from a buffer in an OpenCL program is image2d_t.
* Both the sampler and sampler-less read_image built-in functions are supported for 2D images
* and 2D images created from a buffer. Similarly, the write_image built-ins are also supported
* for 2D images created from a buffer.
*
* When the 2D image from buffer is created, the client must specify the width,
* height, image format (i.e. channel order and channel data type) and optionally the row pitch
*
* The pitch specified must be a multiple of CL_DEVICE_IMAGE_PITCH_ALIGNMENT pixels.
* The base address of the buffer must be aligned to CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT pixels.
*/
/*************************************
* cl_khr_initalize_memory extension *
*************************************/
#define CL_CONTEXT_MEMORY_INITIALIZE_KHR 0x200E
/**************************************
* cl_khr_terminate_context extension *
**************************************/
#define CL_DEVICE_TERMINATE_CAPABILITY_KHR 0x200F
#define CL_CONTEXT_TERMINATE_KHR 0x2010
#define cl_khr_terminate_context 1
extern CL_API_ENTRY cl_int CL_API_CALL clTerminateContextKHR(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
typedef CL_API_ENTRY cl_int (CL_API_CALL *clTerminateContextKHR_fn)(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
/*
* Extension: cl_khr_spir
*
* This extension adds support to create an OpenCL program object from a
* Standard Portable Intermediate Representation (SPIR) instance
*/
/******************************************
* cl_nv_device_attribute_query extension *
******************************************/
/* cl_nv_device_attribute_query extension - no extension #define since it has no functions */
#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
#define CL_DEVICE_REGISTERS_PER_BLOCK_NV 0x4002
#define CL_DEVICE_WARP_SIZE_NV 0x4003
#define CL_DEVICE_GPU_OVERLAP_NV 0x4004
#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
#define CL_DEVICE_INTEGRATED_MEMORY_NV 0x4006
/*********************************
* cl_amd_device_attribute_query *
*********************************/
#define CL_DEVICE_PROFILING_TIMER_OFFSET_AMD 0x4036
#ifdef CL_VERSION_1_1
/***********************************
* cl_ext_device_fission extension *
***********************************/
#define cl_ext_device_fission 1
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
typedef CL_API_ENTRY cl_int
(CL_API_CALL *clReleaseDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
typedef CL_API_ENTRY cl_int
(CL_API_CALL *clRetainDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
typedef cl_ulong cl_device_partition_property_ext;
extern CL_API_ENTRY cl_int CL_API_CALL
clCreateSubDevicesEXT( cl_device_id /*in_device*/,
const cl_device_partition_property_ext * /* properties */,
cl_uint /*num_entries*/,
cl_device_id * /*out_devices*/,
cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
typedef CL_API_ENTRY cl_int
( CL_API_CALL * clCreateSubDevicesEXT_fn)( cl_device_id /*in_device*/,
const cl_device_partition_property_ext * /* properties */,
cl_uint /*num_entries*/,
cl_device_id * /*out_devices*/,
cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
/* cl_device_partition_property_ext */
#define CL_DEVICE_PARTITION_EQUALLY_EXT 0x4050
#define CL_DEVICE_PARTITION_BY_COUNTS_EXT 0x4051
#define CL_DEVICE_PARTITION_BY_NAMES_EXT 0x4052
#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN_EXT 0x4053
/* clDeviceGetInfo selectors */
#define CL_DEVICE_PARENT_DEVICE_EXT 0x4054
#define CL_DEVICE_PARTITION_TYPES_EXT 0x4055
#define CL_DEVICE_AFFINITY_DOMAINS_EXT 0x4056
#define CL_DEVICE_REFERENCE_COUNT_EXT 0x4057
#define CL_DEVICE_PARTITION_STYLE_EXT 0x4058
/* error codes */
#define CL_DEVICE_PARTITION_FAILED_EXT -1057
#define CL_INVALID_PARTITION_COUNT_EXT -1058
#define CL_INVALID_PARTITION_NAME_EXT -1059
/* CL_AFFINITY_DOMAINs */
#define CL_AFFINITY_DOMAIN_L1_CACHE_EXT 0x1
#define CL_AFFINITY_DOMAIN_L2_CACHE_EXT 0x2
#define CL_AFFINITY_DOMAIN_L3_CACHE_EXT 0x3
#define CL_AFFINITY_DOMAIN_L4_CACHE_EXT 0x4
#define CL_AFFINITY_DOMAIN_NUMA_EXT 0x10
#define CL_AFFINITY_DOMAIN_NEXT_FISSIONABLE_EXT 0x100
/* cl_device_partition_property_ext list terminators */
#define CL_PROPERTIES_LIST_END_EXT ((cl_device_partition_property_ext) 0)
#define CL_PARTITION_BY_COUNTS_LIST_END_EXT ((cl_device_partition_property_ext) 0)
#define CL_PARTITION_BY_NAMES_LIST_END_EXT ((cl_device_partition_property_ext) 0 - 1)
#endif /* CL_VERSION_1_1 */
#ifdef __cplusplus
}
#endif
#endif /* __CL_EXT_H */
| 11,540
| 44.797619
| 121
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/libopencl-stub/include/CL/cl_gl.h
|
/**********************************************************************************
* Copyright (c) 2008 - 2012 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
**********************************************************************************/
#ifndef __OPENCL_CL_GL_H
#define __OPENCL_CL_GL_H
#ifdef __APPLE__
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef cl_uint cl_gl_object_type;
typedef cl_uint cl_gl_texture_info;
typedef cl_uint cl_gl_platform_info;
typedef struct __GLsync *cl_GLsync;
/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken */
#define CL_GL_OBJECT_BUFFER 0x2000
#define CL_GL_OBJECT_TEXTURE2D 0x2001
#define CL_GL_OBJECT_TEXTURE3D 0x2002
#define CL_GL_OBJECT_RENDERBUFFER 0x2003
#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E
#define CL_GL_OBJECT_TEXTURE1D 0x200F
#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010
#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011
/* cl_gl_texture_info */
#define CL_GL_TEXTURE_TARGET 0x2004
#define CL_GL_MIPMAP_LEVEL 0x2005
#define CL_GL_NUM_SAMPLES 0x2012
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLBuffer(cl_context /* context */,
cl_mem_flags /* flags */,
cl_GLuint /* bufobj */,
int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLTexture(cl_context /* context */,
cl_mem_flags /* flags */,
cl_GLenum /* target */,
cl_GLint /* miplevel */,
cl_GLuint /* texture */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateFromGLRenderbuffer(cl_context /* context */,
cl_mem_flags /* flags */,
cl_GLuint /* renderbuffer */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetGLObjectInfo(cl_mem /* memobj */,
cl_gl_object_type * /* gl_object_type */,
cl_GLuint * /* gl_object_name */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetGLTextureInfo(cl_mem /* memobj */,
cl_gl_texture_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueAcquireGLObjects(cl_command_queue /* command_queue */,
cl_uint /* num_objects */,
const cl_mem * /* mem_objects */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReleaseGLObjects(cl_command_queue /* command_queue */,
cl_uint /* num_objects */,
const cl_mem * /* mem_objects */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
// Deprecated OpenCL 1.1 APIs
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
clCreateFromGLTexture2D(cl_context /* context */,
cl_mem_flags /* flags */,
cl_GLenum /* target */,
cl_GLint /* miplevel */,
cl_GLuint /* texture */,
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
clCreateFromGLTexture3D(cl_context /* context */,
cl_mem_flags /* flags */,
cl_GLenum /* target */,
cl_GLint /* miplevel */,
cl_GLuint /* texture */,
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
/* cl_khr_gl_sharing extension */
#define cl_khr_gl_sharing 1
typedef cl_uint cl_gl_context_info;
/* Additional Error Codes */
#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000
/* cl_gl_context_info */
#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006
#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007
/* Additional cl_context_properties */
#define CL_GL_CONTEXT_KHR 0x2008
#define CL_EGL_DISPLAY_KHR 0x2009
#define CL_GLX_DISPLAY_KHR 0x200A
#define CL_WGL_HDC_KHR 0x200B
#define CL_CGL_SHAREGROUP_KHR 0x200C
extern CL_API_ENTRY cl_int CL_API_CALL
clGetGLContextInfoKHR(const cl_context_properties * /* properties */,
cl_gl_context_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetGLContextInfoKHR_fn)(
const cl_context_properties * properties,
cl_gl_context_info param_name,
size_t param_value_size,
void * param_value,
size_t * param_value_size_ret);
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_GL_H */
| 7,343
| 44.055215
| 107
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/libopencl-stub/include/CL/cl_gl_ext.h
|
/**********************************************************************************
* Copyright (c) 2008-2012 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
**********************************************************************************/
/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
/* cl_gl_ext.h contains vendor (non-KHR) OpenCL extensions which have */
/* OpenGL dependencies. */
#ifndef __OPENCL_CL_GL_EXT_H
#define __OPENCL_CL_GL_EXT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __APPLE__
#include <OpenCL/cl_gl.h>
#else
#include <CL/cl_gl.h>
#endif
/*
* For each extension, follow this template
* cl_VEN_extname extension */
/* #define cl_VEN_extname 1
* ... define new types, if any
* ... define new tokens, if any
* ... define new APIs, if any
*
* If you need GLtypes here, mirror them with a cl_GLtype, rather than including a GL header
* This allows us to avoid having to decide whether to include GL headers or GLES here.
*/
/*
* cl_khr_gl_event extension
* See section 9.9 in the OpenCL 1.1 spec for more information
*/
#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D
extern CL_API_ENTRY cl_event CL_API_CALL
clCreateEventFromGLsyncKHR(cl_context /* context */,
cl_GLsync /* cl_GLsync */,
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1;
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_GL_EXT_H */
| 2,630
| 36.585714
| 94
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/libopencl-stub/include/CL/opencl.h
|
/*******************************************************************************
* Copyright (c) 2008-2012 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
******************************************************************************/
/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
#ifndef __OPENCL_H
#define __OPENCL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __APPLE__
#include <OpenCL/cl.h>
#include <OpenCL/cl_gl.h>
#include <OpenCL/cl_gl_ext.h>
#include <OpenCL/cl_ext.h>
#else
#include <CL/cl.h>
#include <CL/cl_gl.h>
#include <CL/cl_gl_ext.h>
#include <CL/cl_ext.h>
#endif
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_H */
| 1,754
| 30.909091
| 81
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/libvulkan-stub/include/vulkan/vk_platform.h
|
//
// File: vk_platform.h
//
/*
** Copyright (c) 2014-2015 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#ifndef VK_PLATFORM_H_
#define VK_PLATFORM_H_
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
/*
***************************************************************************************************
* Platform-specific directives and type declarations
***************************************************************************************************
*/
/* Platform-specific calling convention macros.
*
* Platforms should define these so that Vulkan clients call Vulkan commands
* with the same calling conventions that the Vulkan implementation expects.
*
* VKAPI_ATTR - Placed before the return type in function declarations.
* Useful for C++11 and GCC/Clang-style function attribute syntax.
* VKAPI_CALL - Placed after the return type in function declarations.
* Useful for MSVC-style calling convention syntax.
* VKAPI_PTR - Placed between the '(' and '*' in function pointer types.
*
* Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void);
* Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);
*/
#if defined(_WIN32)
// On Windows, Vulkan commands use the stdcall convention
#define VKAPI_ATTR
#define VKAPI_CALL __stdcall
#define VKAPI_PTR VKAPI_CALL
#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7
#error "Vulkan isn't supported for the 'armeabi' NDK ABI"
#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE)
// On Android 32-bit ARM targets, Vulkan functions use the "hardfloat"
// calling convention, i.e. float parameters are passed in registers. This
// is true even if the rest of the application passes floats on the stack,
// as it does by default when compiling for the armeabi-v7a NDK ABI.
#define VKAPI_ATTR __attribute__((pcs("aapcs-vfp")))
#define VKAPI_CALL
#define VKAPI_PTR VKAPI_ATTR
#else
// On other platforms, use the default calling convention
#define VKAPI_ATTR
#define VKAPI_CALL
#define VKAPI_PTR
#endif
#include <stddef.h>
#if !defined(VK_NO_STDINT_H)
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef signed __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef signed __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef signed __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else
#include <stdint.h>
#endif
#endif // !defined(VK_NO_STDINT_H)
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
// Platform-specific headers required by platform window system extensions.
// These are enabled prior to #including "vulkan.h". The same enable then
// controls inclusion of the extension interfaces in vulkan.h.
#ifdef VK_USE_PLATFORM_ANDROID_KHR
#include <android/native_window.h>
#endif
#ifdef VK_USE_PLATFORM_MIR_KHR
#include <mir_toolkit/client_types.h>
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
#include <wayland-client.h>
#endif
#ifdef VK_USE_PLATFORM_WIN32_KHR
#include <windows.h>
#endif
#ifdef VK_USE_PLATFORM_XLIB_KHR
#include <X11/Xlib.h>
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
#include <xcb/xcb.h>
#endif
#endif
| 3,903
| 31.264463
| 99
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/nnapi/dlnnapi.c
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <dlfcn.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "dlnnapi.h"
#define DLNNAPI_DEBUG_LOG 0
#if DLNNAPI_DEBUG_LOG
#include <android/log.h>
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, "NNAPI", __VA_ARGS__)
#endif
#define TAG_API_27 "\x01"
/* clang-format off */
static const char function_names[] =
TAG_API_27 "ANeuralNetworksMemory_createFromFd\0"
TAG_API_27 "ANeuralNetworksMemory_free\0"
TAG_API_27 "ANeuralNetworksModel_create\0"
TAG_API_27 "ANeuralNetworksModel_finish\0"
TAG_API_27 "ANeuralNetworksModel_free\0"
TAG_API_27 "ANeuralNetworksCompilation_create\0"
TAG_API_27 "ANeuralNetworksCompilation_free\0"
TAG_API_27 "ANeuralNetworksCompilation_setPreference\0"
TAG_API_27 "ANeuralNetworksCompilation_finish\0"
TAG_API_27 "ANeuralNetworksModel_addOperand\0"
TAG_API_27 "ANeuralNetworksModel_setOperandValue\0"
TAG_API_27 "ANeuralNetworksModel_setOperandValueFromMemory\0"
TAG_API_27 "ANeuralNetworksModel_addOperation\0"
TAG_API_27 "ANeuralNetworksModel_identifyInputsAndOutputs\0"
TAG_API_27 "ANeuralNetworksExecution_create\0"
TAG_API_27 "ANeuralNetworksExecution_free\0"
TAG_API_27 "ANeuralNetworksExecution_setInput\0"
TAG_API_27 "ANeuralNetworksExecution_setInputFromMemory\0"
TAG_API_27 "ANeuralNetworksExecution_setOutput\0"
TAG_API_27 "ANeuralNetworksExecution_setOutputFromMemory\0"
TAG_API_27 "ANeuralNetworksExecution_startCompute\0"
TAG_API_27 "ANeuralNetworksEvent_wait\0"
TAG_API_27 "ANeuralNetworksEvent_free\0";
/* clang-format on */
bool dlnnapi_load(struct dlnnapi* nnapi, uint32_t flags) {
if (nnapi == NULL) {
return false;
}
memset(nnapi, 0, sizeof(struct dlnnapi));
if (!(flags & DLNNAPI_FLAG_VERSION_27)) {
/* No supported NNAPI version is requested */
return false;
}
/* Clear libdl error state */
dlerror();
nnapi->handle = dlopen("libneuralnetworks.so", RTLD_LAZY | RTLD_LOCAL);
if (nnapi->handle != NULL) {
#if DLNNAPI_DEBUG_LOG
LOGI("note: loaded libneuralnetworks.so\n");
#endif
uint8_t version_flags = (uint8_t)(flags & DLNNAPI_FLAG_VERSION_MASK);
const char* function_name = function_names;
for (size_t i = 0; i < DLNNAPI_FUNCTION_COUNT; i++) {
const uint8_t tag = (uint8_t)*function_name++;
if ((tag & version_flags) != 0) {
void* function = dlsym(nnapi->handle, function_name);
if (function == NULL) {
#if DLNNAPI_DEBUG_LOG
LOGI(
"note: failed to locate %s in libneuralnetworks.so: %s\n",
function_name,
dlerror());
#endif
version_flags &= ~tag;
if (version_flags == 0) {
goto failed;
}
}
nnapi->functions[i] = function;
}
function_name += strlen(function_name) + 1;
}
nnapi->flags = (uint32_t)version_flags;
return true;
}
#if DLNNAPI_DEBUG_LOG
LOGI("note: failed to load libneuralnetworks.so: %s\n", dlerror());
#endif
failed:
dlnnapi_free(nnapi);
return false;
}
void dlnnapi_free(struct dlnnapi* nnapi) {
if (nnapi != NULL) {
if (nnapi->handle != NULL) {
/* Clear libdl error state */
dlerror();
if (dlclose(nnapi->handle) != 0) {
#if DLNNAPI_DEBUG_LOG
LOGI("note: failed to unload libneuralnetworks.so: %s\n", dlerror());
#endif
}
}
memset(nnapi, 0, sizeof(struct dlnnapi));
}
}
| 4,073
| 30.338462
| 77
|
c
|
null |
pytorch-main/caffe2/mobile/contrib/nnapi/nnapi.h
|
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/proto_utils.h"
#include "NeuralNetworks.h"
#include "dlnnapi.h"
namespace caffe2 {
class NNApi {
public:
using TensorVector = std::vector<TensorCPU*>;
// Three different modes:
// ANEURALNETWORKS_PREFER_LOW_POWER
// ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER
// ANEURALNETWORKS_PREFER_SUSTAINED_SPEED
NNApi(
const NetDef& init_net,
const NetDef& run_net,
Workspace* ws = nullptr,
const PreferenceCode pref = ANEURALNETWORKS_PREFER_SUSTAINED_SPEED)
: preference_(pref),
run_net_(run_net),
ws_(ws) {
if (!loadNNApiLibrary()) {
CAFFE_THROW("NNApi is not supported");
}
CAFFE_ENFORCE(ws_.RunNetOnce(init_net));
}
~NNApi();
bool loadNNApiLibrary();
bool run(const TensorVector& inputs, TensorVector* outputs);
private:
dlnnapi libnnapi_;
ANeuralNetworksModel* model_{nullptr};
ANeuralNetworksCompilation* compilation_{nullptr};
ANeuralNetworksExecution* run_{nullptr};
ANeuralNetworksEvent* run_end_{nullptr};
PreferenceCode preference_;
NetDef run_net_;
Workspace ws_;
OperandCode tensor_type_;
uint32_t operand_idx{0};
std::unordered_map<std::string, uint32_t> operand_map_;
// dimensions for the tensors
std::unordered_map<std::string, std::vector<uint32_t>> tensor_dims_;
// mapping of the operator name "Conv" to OperatorType CONV
enum OperatorType {
AVERAGEPOOL,
CONV,
MAXPOOL,
RELU,
SOFTMAX,
};
std::unordered_map<std::string, OperatorType> operator_map_{
{"AveragePool", AVERAGEPOOL},
{"Conv", CONV},
{"MaxPool", MAXPOOL},
{"Relu", RELU},
{"Softmax", SOFTMAX}};
struct ConvPoolArgs {
int kernel_h{0};
int kernel_w{0};
int stride_x{0};
int stride_y{0};
int pad_t{0};
int pad_l{0};
int pad_b{0};
int pad_r{0};
};
void getConvPoolArgs(const ArgumentHelper& helper, ConvPoolArgs& args);
uint32_t addScalarOperand(int32_t val);
uint32_t addFloatOperand(float val);
uint32_t addTensorOperand(
const std::string& blob,
OperandCode type,
std::vector<uint32_t>& dims,
float scale = 1.0,
int32_t zero_point = 0);
// lazily initialize model_ in run()
void init(const TensorVector& inputs, TensorVector* outputs);
void addConv(const OperatorDef& op, bool fuse_relu = false);
void addPooling(
const OperatorDef& op,
OperationCode op_code,
bool fuse_relu = false);
void addRelu(const OperatorDef& op);
void addSoftmax(const OperatorDef& op);
};
} // namespace caffe2
| 2,669
| 23.495413
| 73
|
h
|
null |
pytorch-main/caffe2/mobile/contrib/ulp2/ulp.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
constexpr size_t k2b1bXBits = 2;
struct ConvArgs {
int stride_w{1};
int stride_h{1};
int pad_l{0};
int pad_t{0};
int pad_b{0};
int pad_r{0};
};
using ParallelFor = std::function<void(size_t, std::function<void(size_t)>)>;
struct QConvState {
std::vector<std::unique_ptr<TensorCPU>> XQs;
std::vector<std::unique_ptr<TensorCPU>> YQs;
std::unique_ptr<TensorCPU> WQ;
// architecture-dependent whether packing is used.
std::unique_ptr<TensorCPU> WQPacked;
std::unique_ptr<TensorCPU> WQN;
std::unique_ptr<TensorCPU> WQL1Norm;
// Useful for e.g. incomplete tiles
std::unique_ptr<TensorCPU> scratch;
std::unique_ptr<TensorCPU> scratchColBuffer;
std::unique_ptr<TensorCPU> bias;
ParallelFor parallelFor{nullptr};
};
void uniformQuantize2b1b(const TensorCPU& X,
const std::vector<std::unique_ptr<TensorCPU>>& XQ,
float offset,
float inter_center_distance);
void qpad_zero(const ConvArgs& args, const TensorCPU& X, TensorCPU* Y);
inline size_t divRoundUp(size_t x, size_t d) { return (x + d - 1) / d; }
void signQuantize(const TensorCPU& X, TensorCPU* XQ);
void filterNormalization11(const TensorCPU& WQ, TensorCPU* WQN);
void filterNormalizationL1(const TensorCPU& W, TensorCPU* WL1);
std::unique_ptr<QConvState> create2b1bConvState(Workspace* ws,
const TensorCPU& W,
const TensorCPU* b);
void run2b1bConvGeneric(QConvState* state, const ConvArgs& args, const TensorCPU& X, TensorCPU* Y);
void qconv(
const ConvArgs& args, const TensorCPU& X, const TensorCPU& W, const TensorCPU* b, TensorCPU* Y);
void qim2col(const ConvArgs& args, const TensorCPU& XQ, const TensorCPU& WQ, TensorCPU* XQcol);
void run2b1bUnification(QConvState* state,
size_t N,
size_t C,
const float* WQNVdata,
const float* YQs0Vdata,
const float* YQs1Vdata,
size_t YQstride,
float* Ydata,
size_t Ystride,
const float* bias);
} // namespace caffe2
| 2,346
| 32.528571
| 100
|
h
|
null |
pytorch-main/caffe2/mpi/mpi_common.h
|
#ifndef CAFFE2_MPI_MPI_COMMON_H_
#define CAFFE2_MPI_MPI_COMMON_H_
#include <mpi.h>
#include <mutex>
#include "caffe2/core/common.h"
#include "caffe2/core/logging.h"
namespace caffe2 {
inline void CheckInitializedMPI() {
int flag;
MPI_Initialized(&flag);
CAFFE_ENFORCE(flag, "MPI does not seem to have been initialized.");
}
template <typename T>
class MPIDataTypeWrapper;
#define MPI_DATATYPE_WRAPPER(c_type, mpi_type) \
template <> \
class MPIDataTypeWrapper<c_type> { \
public: \
inline static MPI_Datatype type() { \
return mpi_type; \
} \
};
MPI_DATATYPE_WRAPPER(char, MPI_CHAR)
MPI_DATATYPE_WRAPPER(float, MPI_FLOAT)
MPI_DATATYPE_WRAPPER(double, MPI_DOUBLE)
// Note(Yangqing): as necessary, add more specializations.
#undef MPI_DATATYPE_WRAPPER
// For all Caffe MPI calls, we will wrap it inside an MPI mutex lock guard.
TORCH_API std::mutex& MPIMutex();
#define MPI_CHECK(condition) \
do { \
std::lock_guard<std::mutex> guard(::caffe2::MPIMutex()); \
int error = (condition); \
CAFFE_ENFORCE( \
error == MPI_SUCCESS, \
"Caffe2 MPI Error at: ", \
__FILE__, \
":", \
__LINE__, \
": ", \
error); \
} while (0)
/**
* @brief Gets the global MPI communicator used by Caffe2. In default, this
* is MPI_COMM_WORLD unless you call SetGlobalMPIComm().
*/
TORCH_API MPI_Comm GlobalMPIComm();
/**
* @brief Sets the global MPI communicator. Caffe2 takes over the ownership
* of the passed in communicator.
*/
TORCH_API void SetGlobalMPIComm(MPI_Comm new_comm);
/**
* @brief A helper function to return the size of the given communicator.
*/
TORCH_API int MPICommSize(MPI_Comm comm);
/**
* @brief A helper function to return the rank of the given communicator.
*/
TORCH_API int MPICommRank(MPI_Comm comm);
/**
* @brief A simple wrapper over an MPI common world.
*/
class MPICommonWorldWrapper {
public:
/**
* @brief Creates a common world wrapper.
*
* The new common world is created by taking the existing communicator
* passed in as src_comm, and splitting it using the color and the rank
* specified. In default, we will split from Caffe2's global communicator,
* and use color 0 as well as rank implicitly given by src_comm. As a result,
* the default constructor basically creates a comm identical to the source
* comm world.
*/
explicit MPICommonWorldWrapper(
MPI_Comm src_comm = MPI_COMM_NULL,
int color = 0,
int rank = -1) {
if (src_comm == MPI_COMM_NULL) {
src_comm = GlobalMPIComm();
}
if (rank == -1) {
MPI_CHECK(MPI_Comm_rank(src_comm, &rank));
}
MPI_CHECK(MPI_Comm_split(src_comm, color, rank, &comm_));
MPI_CHECK(MPI_Comm_size(comm_, &size_));
MPI_CHECK(MPI_Comm_rank(comm_, &rank_));
}
~MPICommonWorldWrapper() {
int ret;
MPI_CHECK(MPI_Finalized(&ret));
if (!ret) {
MPI_Comm_free(&comm_);
}
}
/**
* @brief Returns the common world held by the wrapper.
*/
inline MPI_Comm comm() const {
return comm_;
}
/**
* @brief Returns the size of the world.
*/
inline int size() const {
return size_;
}
/**
* @brief Returns the rank of this process in the world.
*/
inline int rank() const {
return rank_;
}
private:
MPI_Comm comm_;
int size_;
int rank_;
};
/**
* A function used to perform peer setup so one does not need to use
* mpirun / mpiexec to run the binary. Note that if you use mpirun or mpiexec
* to set up the common world, do not use this function - MPI_Init would have
* already set that up.
*
* This also assumes that you have a common path (like NFS) that multiple
* instances can read from.
*
* Inputs:
* replicas (int): the number of replicas that mpi will run with.
* role (string): the role of this process, "server" or "client".
* job_path (string): a file name that the server will write its port into
* and the clients will read the server's port from.
*/
void MPISetupPeers(
const int replicas,
const string& role,
const string& job_path);
} // namespace caffe2
#endif // CAFFE2_MPI_MPI_COMMON_H_
| 4,756
| 28.73125
| 79
|
h
|
null |
pytorch-main/caffe2/mpi/mpi_ops.h
|
#ifndef CAFFE2_MPI_MPI_OPS_H_
#define CAFFE2_MPI_MPI_OPS_H_
#include <mpi.h>
#include "caffe2/core/operator.h"
#include "caffe2/mpi/mpi_common.h"
namespace caffe2 {
// TODO(jiayq): if needed, write up the use of color and key with MPI split.
// Currently, the operator simply creates a communicator that has the
// same topology as the Caffe2 global communicator.
template <class Context>
class MPICreateCommonWorldOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
MPICreateCommonWorldOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
OperatorBase::Outputs()[0]->Reset(new MPICommonWorldWrapper());
return true;
}
};
template <class Context>
class MPIBroadcastOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
MPIBroadcastOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
root_(OperatorBase::template GetSingleArgument<int>("root", 0)) {}
~MPIBroadcastOp() override {}
bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
CAFFE_ENFORCE(
OperatorBase::OutputIsTensorType(0, Context::GetDeviceType()),
"Output is of wrong type.");
auto* output = Output(0);
// Make sure that output is already allocated.
CAFFE_ENFORCE(
output->numel() > 0,
"Broadcast op uses in-place operation so the output "
"should be already allocated.");
MPI_CHECK(MPI_Bcast(
output->raw_mutable_data(),
output->nbytes(),
MPIDataTypeWrapper<char>::type(),
root_,
comm));
return true;
}
protected:
int root_;
};
// MPIReduceOp does Reduce using MPI. Currently, only SUM is supported.
template <typename T, class Context>
class MPIReduceOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
MPIReduceOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
root_(OperatorBase::template GetSingleArgument<int>("root", 0)) {}
~MPIReduceOp() override {}
bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
auto& input = Input(1);
auto* output = Output(0, input.sizes(), at::dtype<T>());
MPI_CHECK(MPI_Reduce(
const_cast<T*>(input.template data<T>()),
output->template mutable_data<T>(),
input.numel(),
MPIDataTypeWrapper<T>::type(),
MPI_SUM,
root_,
comm));
return true;
}
protected:
int root_;
};
// MPIAllgatherOp does MPIAllgather using MPI.
template <typename T, class Context>
class MPIAllgatherOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(MPIAllgatherOp);
bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
auto& input = Input(1);
auto* output = Output(0);
vector<int64_t> output_dims = input.sizes().vec();
output_dims[0] *= OperatorBase::Input<MPICommonWorldWrapper>(0).size();
output->Resize(output_dims);
MPI_CHECK(MPI_Allgather(
const_cast<T*>(input.template data<T>()),
input.numel(),
MPIDataTypeWrapper<T>::type(),
output->template mutable_data<T>(),
input.numel(),
MPIDataTypeWrapper<T>::type(),
comm));
return true;
}
};
// MPIAllreduceOp does MPIAllreduce using MPI. Currently, only SUM is supported.
template <typename T, class Context>
class MPIAllreduceOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
USE_SIMPLE_CTOR_DTOR(MPIAllreduceOp);
bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
auto& input = Input(1);
auto* output = Output(0, input.sizes(), at::dtype<T>());
void* source;
if (output->template mutable_data<T>() == input.template data<T>()) {
// We are doing in-place call. Special case handling.
source = MPI_IN_PLACE;
} else {
// Normal allreduce takes the source from the input.
source = const_cast<T*>(input.template data<T>());
}
MPI_CHECK(MPI_Allreduce(
source,
output->template mutable_data<T>(),
input.numel(),
MPIDataTypeWrapper<T>::type(),
MPI_SUM,
comm));
return true;
}
};
template <class Context>
class MPISendTensorOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
MPISendTensorOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
OP_SINGLE_ARG(int, "dst", dst_, MPI_ANY_SOURCE),
OP_SINGLE_ARG(int, "tag", tag_, MPI_ANY_TAG),
OP_SINGLE_ARG(bool, "raw_buffer", raw_buffer_, false) {
CAFFE_ENFORCE(raw_buffer_, "non-raw-buffer transfer not supported yet.");
CAFFE_ENFORCE(
dst_ != MPI_ANY_SOURCE || def.input_size() == 4,
"You should explicitly specify the to rank either via "
"argument or via input blobs.");
CAFFE_ENFORCE(
tag_ != MPI_ANY_TAG || def.input_size() == 4,
"You should explicitly specify the tag either via "
"argument or via input blobs.");
}
bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(COMM).comm();
auto& input = Input(INPUT);
if (InputSize() == 4) {
dst_ = OperatorBase::Input<Tensor>(DST, CPU).template data<int>()[0];
tag_ = OperatorBase::Input<Tensor>(TAG, CPU).template data<int>()[0];
}
if (raw_buffer_) {
// We need to do a const cast to cope with the fact that, before OpenMPI
// 1.7, MPI_Send expects a non-const pointer although it uses it in a
// const way.
MPI_CHECK(MPI_Send(
const_cast<void*>(input.raw_data()),
input.nbytes(),
MPI_CHAR,
dst_,
tag_,
comm));
} else {
CAFFE_NOT_IMPLEMENTED;
}
return true;
}
protected:
int dst_;
int tag_;
bool raw_buffer_;
INPUT_TAGS(COMM, INPUT, DST, TAG);
};
template <class Context>
class MPIReceiveTensorOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
MPIReceiveTensorOp(const OperatorDef& def, Workspace* ws)
: Operator<Context>(def, ws),
OP_SINGLE_ARG(int, "src", src_, MPI_ANY_SOURCE),
OP_SINGLE_ARG(int, "tag", tag_, MPI_ANY_TAG),
OP_SINGLE_ARG(bool, "raw_buffer", raw_buffer_, false) {
CAFFE_ENFORCE(raw_buffer_, "non-raw-buffer transfer not supported yet.");
}
bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(COMM).comm();
if (InputSize() == 4) {
src_ = OperatorBase::Input<Tensor>(SRC_IN, CPU).template data<int>()[0];
tag_ = OperatorBase::Input<Tensor>(TAG_IN, CPU).template data<int>()[0];
}
MPI_Status status;
if (raw_buffer_) {
auto* output = Output(OUTPUT);
MPI_CHECK(MPI_Recv(
output->raw_mutable_data(),
output->nbytes(),
MPI_CHAR,
src_,
tag_,
comm,
&status));
} else {
CAFFE_NOT_IMPLEMENTED;
}
auto* src_out = OperatorBase::Output<Tensor>(SRC_OUT, CPU);
src_out->Resize();
src_out->template mutable_data<int>()[0] = status.MPI_SOURCE;
auto* tag_out = OperatorBase::Output<Tensor>(TAG_OUT, CPU);
tag_out->Resize();
tag_out->template mutable_data<int>()[0] = status.MPI_TAG;
return true;
}
protected:
int src_;
int tag_;
bool raw_buffer_;
INPUT_TAGS(COMM, INPUT, SRC_IN, TAG_IN);
OUTPUT_TAGS(OUTPUT, SRC_OUT, TAG_OUT);
};
} // namespace caffe2
#endif // CAFFE2_MPI_MPI_OPS_H_
| 7,787
| 30.277108
| 80
|
h
|
null |
pytorch-main/caffe2/observers/operator_attaching_net_observer.h
|
#pragma once
#include "caffe2/core/net.h"
#include "caffe2/core/observer.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
// Thin class that attaches the observer to all operators in the net
template <typename TOpObserver, typename TNetObserver>
class OperatorAttachingNetObserver : public ObserverBase<NetBase> {
public:
explicit OperatorAttachingNetObserver(
NetBase* subject_,
TNetObserver* netObserver)
: ObserverBase<NetBase>(subject_) {
const auto& operators = subject_->GetOperators();
for (auto* op : operators) {
auto observer = std::make_unique<TOpObserver>(op, netObserver);
const auto* ob = observer.get();
op->AttachObserver(std::move(observer));
operator_observers_.push_back(ob);
}
}
virtual ~OperatorAttachingNetObserver(){};
protected:
std::vector<const TOpObserver*> operator_observers_;
};
} // namespace caffe2
| 907
| 27.375
| 69
|
h
|
null |
pytorch-main/caffe2/observers/profile_observer.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <unordered_map>
#include "caffe2/core/common.h"
#include "caffe2/core/event.h"
#include "caffe2/core/net.h"
#include "caffe2/core/observer.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/timer.h"
#include "caffe2/observers/operator_attaching_net_observer.h"
namespace caffe2 {
/**
* This observer displays a description of each operator executed in a network.
* This includes input and tensors (name, size, type), arguments, and execution
* time. This can be used to analyze different performance characteristics.
* NOTE: Currently this observer only supports synchronized computation
**/
class ProfileObserver;
class ProfileCounter {
public:
explicit ProfileCounter() {}
protected:
Timer timer_;
float start_time_ = 0.0f;
float run_time_ = 0.0f;
};
class TORCH_API ProfileOperatorObserver final
: public ProfileCounter,
public ObserverBase<OperatorBase> {
public:
explicit ProfileOperatorObserver(OperatorBase* subject) = delete;
explicit ProfileOperatorObserver(
OperatorBase* subject,
ProfileObserver* netObserver)
: ObserverBase<OperatorBase>(subject), netObserver_(netObserver) {
if (subject) {
net_position_ = subject->net_position();
}
}
explicit ProfileOperatorObserver(
OperatorBase* subject,
ProfileObserver* netObserver,
int net_position,
int rnn_order)
: ProfileOperatorObserver(subject, netObserver) {
net_position_ = net_position;
rnn_order_ = rnn_order;
}
std::unique_ptr<ObserverBase<OperatorBase>> rnnCopy(
OperatorBase* subject,
int rnn_order) const override;
void Dump() const;
virtual std::string getId() const {
std::stringstream ss;
ss << net_position_;
if (rnn_order_ != OperatorBase::kNoNetPositionSet) {
ss << "-" << rnn_order_;
}
return ss.str();
}
protected:
ProfileObserver* netObserver_;
int net_position_; // Needed because this is not visible in RNN Executor
int rnn_order_ = OperatorBase::kNoNetPositionSet;
private:
void Start() override;
void Stop() override;
};
class TORCH_API ProfileObserver final : public OperatorAttachingNetObserver<
ProfileOperatorObserver,
ProfileObserver> {
public:
explicit ProfileObserver(NetBase* subject)
: OperatorAttachingNetObserver<ProfileOperatorObserver, ProfileObserver>(
subject,
this) {}
void Start() override{};
void Stop() override{};
private:
vector<const ProfileOperatorObserver*> operator_observers_;
};
} // namespace caffe2
| 3,258
| 27.587719
| 79
|
h
|
null |
pytorch-main/caffe2/observers/runcnt_observer.h
|
#pragma once
#include "caffe2/core/net.h"
#include "caffe2/core/observer.h"
#include "caffe2/core/operator.h"
#include "caffe2/observers/operator_attaching_net_observer.h"
namespace caffe2 {
class RunCountNetObserver;
class TORCH_API RunCountOperatorObserver final
: public ObserverBase<OperatorBase> {
public:
explicit RunCountOperatorObserver(OperatorBase* op) = delete;
RunCountOperatorObserver(OperatorBase* op, RunCountNetObserver* netObserver);
~RunCountOperatorObserver() {}
std::unique_ptr<ObserverBase<OperatorBase>> rnnCopy(
OperatorBase* subject,
int rnn_order) const override;
private:
void Start() override;
void Stop() override;
private:
RunCountNetObserver* netObserver_;
};
class TORCH_API RunCountNetObserver final : public OperatorAttachingNetObserver<
RunCountOperatorObserver,
RunCountNetObserver> {
public:
explicit RunCountNetObserver(NetBase* subject_)
: OperatorAttachingNetObserver<
RunCountOperatorObserver,
RunCountNetObserver>(subject_, this),
cnt_(0) {}
~RunCountNetObserver() {}
std::string debugInfo() override;
friend class RunCountOperatorObserver;
private:
void Start() override;
void Stop() override;
protected:
std::atomic<int> cnt_;
};
} // namespace caffe2
| 1,391
| 24.777778
| 80
|
h
|
null |
pytorch-main/caffe2/observers/time_observer.h
|
#ifndef CAFFE2_CONTRIB_OBSERVERS_TIME_OBSERVER_H_
#define CAFFE2_CONTRIB_OBSERVERS_TIME_OBSERVER_H_
#include <unordered_map>
#include "caffe2/core/common.h"
#include "caffe2/core/net.h"
#include "caffe2/core/observer.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/timer.h"
#include "caffe2/observers/operator_attaching_net_observer.h"
namespace caffe2 {
class TimeObserver;
class TORCH_API TimeCounter {
public:
explicit TimeCounter() {}
inline float average_time() const {
return total_time_ / iterations_;
}
protected:
Timer timer_;
float start_time_ = 0.0f;
float total_time_ = 0.0f;
int iterations_ = 0;
};
class TORCH_API TimeOperatorObserver final : public TimeCounter,
public ObserverBase<OperatorBase> {
public:
explicit TimeOperatorObserver(OperatorBase* subject) = delete;
explicit TimeOperatorObserver(
OperatorBase* subject,
TimeObserver* /* unused */)
: ObserverBase<OperatorBase>(subject) {}
std::unique_ptr<ObserverBase<OperatorBase>> rnnCopy(
OperatorBase* subject,
int rnn_order) const override;
private:
void Start() override;
void Stop() override;
};
class TORCH_API TimeObserver final
: public TimeCounter,
public OperatorAttachingNetObserver<TimeOperatorObserver, TimeObserver> {
public:
explicit TimeObserver(NetBase* subject)
: OperatorAttachingNetObserver<TimeOperatorObserver, TimeObserver>(
subject,
this) {}
float average_time_children() const {
float sum = 0.0f;
for (const auto* observer : operator_observers_) {
sum += observer->average_time();
}
return sum / subject_->GetOperators().size();
}
private:
void Start() override;
void Stop() override;
};
} // namespace caffe2
#endif // CAFFE2_CONTRIB_OBSERVERS_TIME_OBSERVER_H_
| 1,862
| 24.520548
| 80
|
h
|
null |
pytorch-main/caffe2/onnx/backend.h
|
#pragma once
#include "caffe2/onnx/backend_rep.h"
#include "caffe2/onnx/device.h"
#include "caffe2/onnx/helper.h"
#include "caffe2/proto/caffe2_pb.h"
#include "onnx/onnx_pb.h"
#include <functional>
#include <string>
#include <unordered_map>
#include <unordered_set>
constexpr int kKnownOpsetVersion = 9;
namespace caffe2 {
namespace onnx {
using ::ONNX_NAMESPACE::AttributeProto;
using ::ONNX_NAMESPACE::GraphProto;
using ::ONNX_NAMESPACE::ModelProto;
using ::ONNX_NAMESPACE::NodeProto;
using ::ONNX_NAMESPACE::TensorProto;
using ::ONNX_NAMESPACE::ValueInfoProto;
using ValueInfoMap = std::unordered_map<std::string, ValueInfoProto>;
class TORCH_API ConversionContext {
public:
ConversionContext(const ValueInfoMap& value_infos, int opset_version)
: value_infos_(value_infos), opset_version_(opset_version) {}
const ValueInfoMap& value_infos() const {
return value_infos_;
}
int opset_version() const {
return opset_version_;
}
private:
const ValueInfoMap& value_infos_;
const int opset_version_;
};
// \brief This struct holds the converted ops after the onnx->c2 conversion.
// Notice that for RNN ops, it may create ops in init_net. Hence we have the
// `init_ops` field.
struct TORCH_API Caffe2Ops {
::google::protobuf::RepeatedPtrField<caffe2::OperatorDef> init_ops;
::google::protobuf::RepeatedPtrField<caffe2::OperatorDef> ops;
::google::protobuf::RepeatedPtrField<std::string> interface_blobs;
};
// A convenient class to query attributes of a NodeProto. Note that the
// NodeProto can not be modified during the query of OnnxAttributes object
class TORCH_API OnnxAttributes {
public:
OnnxAttributes(const NodeProto& node);
bool HasAttribute(const std::string& key) const {
return onnx_attrs_.count(key);
}
AttributeProto* AddRewrittenAttribute(const std::string& key) {
auto tmp = rewritten_onnx_attrs_.emplace(key, AttributeProto());
auto& attr = tmp.first->second;
attr.set_name(key);
return &attr;
}
::google::protobuf::RepeatedPtrField<caffe2::Argument> OnnxAttrToCaffe2Arg(
std::function<std::string(const std::string&)> mapper) const;
// Get attribute given attribute name, specialied on data type T. Note that
// the return value is copied
template <typename T>
T get(const std::string& key) const;
template <typename T>
T get(const std::string& key, const T& default_value) const {
if (onnx_attrs_.count(key)) {
return get<T>(key);
} else {
return default_value;
}
}
const AttributeProto* remove(const std::string& key) {
const AttributeProto* result = nullptr;
auto iter = onnx_attrs_.find(key);
if (iter != onnx_attrs_.end()) {
result = iter->second;
onnx_attrs_.erase(iter);
}
return result;
}
private:
std::unordered_map<std::string, const AttributeProto*> onnx_attrs_;
std::unordered_map<std::string, AttributeProto> rewritten_onnx_attrs_;
};
template <>
int64_t OnnxAttributes::get(const std::string& key) const;
template <>
float OnnxAttributes::get(const std::string& key) const;
template <>
::google::protobuf::RepeatedPtrField<std::string> OnnxAttributes::get(
const std::string& key) const;
template <>
::google::protobuf::RepeatedField<::google::protobuf::int64>
OnnxAttributes::get(const std::string& key) const;
template <>
::google::protobuf::RepeatedField<float> OnnxAttributes::get(
const std::string& key) const;
template <>
const TensorProto* OnnxAttributes::get(const std::string& key) const;
// convenient class for onnx node
struct TORCH_API OnnxNode {
OnnxNode(const NodeProto& node_in) : node(node_in), attributes(node_in) {}
const NodeProto& node;
OnnxAttributes attributes;
};
class TORCH_API Caffe2Backend {
public:
// Since we still have this Python-C++ hybrid flow, we will need to take the
// DummyName generator from Python as a pointer. In this case, Python env owns
// the DummyName object and we don't need to keep track of its life time in
// C++. Therefore in this case, we use a null dtor to prevent C++ shared_ptr
// from releasing the object
Caffe2Backend(DummyName* dummy = nullptr) {
if (dummy) {
dummy_ = std::shared_ptr<DummyName>(dummy, [](DummyName*) {});
} else {
dummy_ = std::make_shared<DummyName>();
}
}
Caffe2BackendRep* Prepare(
const std::string& onnx_model_str,
const std::string& device,
const std::vector<Caffe2Ops>& extras);
bool SupportOp(const std::string tyep) const;
Caffe2Ops ConvertNode(
const std::string& node_str,
const ConversionContext& ctx);
void BuildTensorFillingOp(
caffe2::OperatorDef* c2_op,
const TensorProto& onnx_tensor,
const std::string& output_name = "",
const std::string& shape_name = "");
private:
using SpecialOpConverter =
Caffe2Ops (Caffe2Backend::*)(OnnxNode*, const ConversionContext&);
void OnnxToCaffe2(
caffe2::NetDef* init_net,
caffe2::NetDef* pred_net,
const ModelProto& onnx_model,
const std::string& device,
int opset_version,
bool include_initializers,
const std::vector<Caffe2Ops>& extras);
void CheckOpSchemaArguments(
const caffe2::OpSchema& schema,
const caffe2::OperatorDef& op);
Caffe2Ops OnnxNodeToCaffe2Ops(
const ModelProto& init_model,
const ModelProto& pred_model,
const ConversionContext& ctx,
OnnxNode* onnx_node);
std::unordered_set<std::string> AllNamesInGraph(const GraphProto& graph);
Caffe2Ops CommonOnnxNodeToCaffe2Ops(
OnnxNode* onnx_node,
const ConversionContext& ctx);
Caffe2Ops CreateArgMaxMin(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateCast(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateConstant(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateConstantOfShape(
OnnxNode* onnx_node,
const ConversionContext& ctx);
Caffe2Ops CreateConvPoolOpBase(
OnnxNode* onnx_node,
const ConversionContext& ctx);
Caffe2Ops CreatePadPool(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateReshape(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateGather(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateGemm(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreatePad(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateConcat(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateLogSoftmax(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateSlice(OnnxNode* onnx_node, const ConversionContext& ctx);
std::string PreprocessSliceIndexTensor(
OnnxNode* onnx_node,
Caffe2Ops& ret,
std::string indices_tensor,
std::string axes_tensor,
std::string rank_tensor,
std::string zero_tensor,
std::string one_tensor,
int default_value);
Caffe2Ops CreateDynamicSlice(
OnnxNode* onnx_node,
const ConversionContext& ctx);
Caffe2Ops CreateSplit(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateReciprocal(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateRandomNormal(
OnnxNode* onnx_node,
const ConversionContext& ctx);
Caffe2Ops CreateWhereOp(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateNonZeroOp(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateMultinomialOp(
OnnxNode* onnx_node,
const ConversionContext& ctx);
Caffe2Ops CreateBatchNormalization(
OnnxNode* onnx_node,
const ConversionContext& ctx);
Caffe2Ops CreateMatMul(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateUpsample(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateDropout(OnnxNode* onnx_node, const ConversionContext& ctx);
Caffe2Ops CreateLRN(OnnxNode* onnx_node, const ConversionContext& ctx);
// LUT related getters
const std::unordered_map<std::string, std::string>& get_renamed_operators()
const;
const std::unordered_set<std::string>& get_rnn_operators() const;
const std::unordered_map<std::string, int>& get_broken_operators() const;
const std::unordered_map<std::string, std::string>& get_renamed_attrs() const;
const std::
unordered_map<std::string, std::unordered_map<std::string, std::string>>&
get_per_op_renamed_attrs() const;
const std::unordered_map<std::string, Caffe2Backend::SpecialOpConverter>&
get_special_operators() const;
// Dummy name generator
std::shared_ptr<DummyName> dummy_;
};
} // namespace onnx
} // namespace caffe2
| 8,674
| 29.762411
| 80
|
h
|
null |
pytorch-main/caffe2/onnx/backend_rep.h
|
#pragma once
#include "caffe2/predictor/predictor.h"
#include "caffe2/proto/caffe2_pb.h"
#include <memory>
#include <string>
#include <vector>
namespace caffe2 {
namespace onnx {
class TORCH_API Caffe2BackendRep {
public:
void Run(
const caffe2::Predictor::TensorList& inputs,
caffe2::Predictor::TensorList* outputs);
void RunMap(
const caffe2::Predictor::TensorMap& inputs,
caffe2::Predictor::TensorList* outputs);
caffe2::NetDef& init_net() {
return init_net_;
}
caffe2::NetDef& pred_net() {
return pred_net_;
}
std::vector<std::string>& uninitialized_inputs() {
return uninitialized_inputs_;
}
const caffe2::NetDef& init_net() const {
return init_net_;
}
const caffe2::NetDef& pred_net() const {
return pred_net_;
}
const std::vector<std::string>& uninitialized_inputs() const {
return uninitialized_inputs_;
}
private:
void CheckInit();
caffe2::NetDef init_net_;
caffe2::NetDef pred_net_;
std::vector<std::string> uninitialized_inputs_;
std::unique_ptr<caffe2::Predictor> predictor_{nullptr};
};
} // namespace onnx
} // namespace caffe2
| 1,137
| 21.313725
| 64
|
h
|
null |
pytorch-main/caffe2/onnx/helper.h
|
#pragma once
#include "caffe2/core/common.h"
#include "onnx/onnx_pb.h"
#include <set>
#include <string>
#include <unordered_set>
namespace caffe2 {
namespace onnx {
using ::ONNX_NAMESPACE::AttributeProto;
using ::ONNX_NAMESPACE::NodeProto;
// \brief This class generates unique dummy names
class TORCH_API DummyName {
public:
std::string NewDummyName();
void Reset(const std::unordered_set<std::string>& used_names);
void AddName(const std::string& new_used) {
used_names_.insert(new_used);
}
private:
std::unordered_set<std::string> used_names_;
size_t counter_{0};
};
::ONNX_NAMESPACE::TypeProto ExtraTypeProto(
const ::ONNX_NAMESPACE::TensorProto& tensor);
inline AttributeProto MakeAttribute(
const std::string& name,
const std::vector<int64_t>& vals) {
AttributeProto attr;
attr.set_name(name);
for (const auto v : vals) {
attr.add_ints(v);
}
attr.set_type(AttributeProto::INTS);
return attr;
}
inline AttributeProto MakeAttribute(
const std::string& name,
const std::vector<float>& vals) {
AttributeProto attr;
attr.set_name(name);
for (const auto v : vals) {
attr.add_floats(v);
}
attr.set_type(AttributeProto::FLOATS);
return attr;
}
inline AttributeProto MakeAttribute(const std::string& name, int64_t val) {
AttributeProto attr;
attr.set_name(name);
attr.set_i(val);
attr.set_type(AttributeProto::INT);
return attr;
}
inline AttributeProto MakeAttribute(
const std::string& name,
const std::string& val) {
AttributeProto attr;
attr.set_name(name);
attr.set_s(val);
attr.set_type(AttributeProto::STRING);
return attr;
}
inline AttributeProto MakeAttribute(
const std::string& name,
::ONNX_NAMESPACE::TensorProto& val) {
AttributeProto attr;
attr.set_name(name);
attr.mutable_t()->CopyFrom(val);
attr.set_type(AttributeProto::TENSOR);
return attr;
}
template <class T>
::ONNX_NAMESPACE::TensorProto MakeTensor(
const string& name,
const std::vector<T>& v,
const ::ONNX_NAMESPACE::TensorProto_DataType& data_type_) {
::ONNX_NAMESPACE::TensorProto ret;
ret.set_name(name);
ret.add_dims(v.size());
ret.set_data_type(data_type_);
ret.mutable_raw_data()->assign(
reinterpret_cast<const char*>(v.data()), v.size() * sizeof(T));
return ret;
}
TORCH_API NodeProto MakeNode(
const std::string& type,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,
const std::vector<AttributeProto>& attributes,
const std::string& name = "");
inline NodeProto MakeNode(
const std::string& type,
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,
const std::string& name = "") {
return MakeNode(type, inputs, outputs, {}, name);
}
} // namespace onnx
} // namespace caffe2
| 2,819
| 22.898305
| 75
|
h
|
null |
pytorch-main/caffe2/onnx/offline_tensor.h
|
#pragma once
#include <c10/core/Storage.h>
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
namespace caffe2 {
#ifndef C10_MOBILE
struct TORCH_API OfflineTensor {
// A shell tensor to record shape and dtype
Tensor shape_tensor{CPU};
void setShapeAndType(
const std::vector<int>& sizes,
at::Device device,
caffe2::TypeMeta data_type) {
shape_tensor.unsafeGetTensorImpl()->set_storage_and_dtype(
at::Storage::create_legacy(device), data_type);
shape_tensor.Resize(sizes);
CHECK(!shape_tensor.storage_initialized());
CHECK(shape_tensor.dtype_initialized());
}
};
class OfflineTensorShapeFunctions : public ExternalTensorFunctionsBase {
public:
explicit OfflineTensorShapeFunctions() : ExternalTensorFunctionsBase() {}
~OfflineTensorShapeFunctions() override {}
bool isQuantized() const override {
return false;
}
bool IsSameMetaType(TypeIdentifier id) override;
void SetupExternalTensorDescriptor(
const Blob* blob,
std::vector<std::vector<uint64_t>>* shapes,
std::vector<std::vector<float>>* all_scales,
std::vector<std::vector<int32_t>>* all_offsets,
ExternalTensorDescriptor* desc) override;
void LoadInfoOfBlob(
const Blob* /* unused */,
std::vector<float>* /* unused */,
std::vector<float>* /* unused */,
uint32_t* /* unused */) override {}
TypeIdentifier GetTypeMetaId() override;
TypeMeta GetExternalTensorType(const void* c) override;
vector<int64_t> GetExternalTensorInfo(
const void* c,
size_t* capacity,
DeviceOption* device) override;
};
#endif
} // namespace caffe2
| 1,645
| 29.481481
| 75
|
h
|
null |
pytorch-main/caffe2/onnx/onnx_exporter.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/core/tensor.h"
#include "caffe2/onnx/helper.h"
#include "caffe2/proto/caffe2_pb.h"
#include "onnx/onnx_pb.h"
#include <string>
#include <unordered_map>
#include <vector>
namespace caffe2 {
namespace onnx {
namespace {
using ::ONNX_NAMESPACE::AttributeProto;
using ::ONNX_NAMESPACE::GraphProto;
using ::ONNX_NAMESPACE::ModelProto;
using ::ONNX_NAMESPACE::NodeProto;
using ::ONNX_NAMESPACE::TensorProto;
} // namespace
using ConvertedResult =
std::pair<std::vector<NodeProto>, std::vector<TensorProto>>;
// Useful utility function
void rewriteSubnet(
Argument* arg,
std::map<std::string, std::string> oldname_to_newname);
// Rewrite Caffe2 nets into SSA forms. Notice that we will preserve the external
// output names for predict net.
TORCH_API std::unordered_map<std::string, std::string> SsaRewrite(
caffe2::NetDef* init_net,
caffe2::NetDef* pred_net,
bool PreserveInPlaceOps = true);
::ONNX_NAMESPACE::TensorProto::DataType Caffe2TypeToOnnxType(
caffe2::TensorProto::DataType t);
class TORCH_API OnnxExporter {
using SpecialOpConverter = ConvertedResult (OnnxExporter::*)(
const caffe2::OperatorDef&,
const std::unordered_map<std::string, caffe2::TensorShape>&);
public:
OnnxExporter(DummyName* dummy = nullptr) {
if (dummy) {
dummy_ = std::shared_ptr<DummyName>(dummy, [](DummyName*) {});
} else {
dummy_ = std::make_shared<DummyName>();
}
}
ConvertedResult Caffe2OpToOnnxNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
void InitOpToTensorProto(const caffe2::OperatorDef& def, TensorProto* tensor);
private:
ConvertedResult CommonCaffe2OpToOnnxNodes(const caffe2::OperatorDef& def);
ConvertedResult CreateArgMaxMinOpNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateBinaryElementwiseOpNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateCastNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateElementwiseLinearNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateConvPoolNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateGemmNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateReshapeNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateSliceNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateChannelShuffleNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateReduceMeanNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateConcatNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateMergeDimNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateLrnNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
ConvertedResult CreateUpsampleNodes(
const caffe2::OperatorDef& def,
const std::unordered_map<std::string, caffe2::TensorShape>& shapes);
// \brief Check block listed arguments where we won't pass down when
// converting to ONNX node
bool IsBlockListed(const caffe2::Argument& arg);
// \brief Convert Caffe2 argument to Onnx attribute
void CopyCaffe2ArgToOnnxAttr(
AttributeProto* attr,
const std::string& op_type,
const caffe2::Argument& arg);
// LUT getters
const std::unordered_map<std::string, std::string>& get_renamed_operators()
const;
const std::unordered_map<std::string, std::string>& get_renamed_attrs() const;
const std::
unordered_map<std::string, std::unordered_map<std::string, std::string>>&
get_per_op_renamed_attrs() const;
const std::unordered_map<std::string, OnnxExporter::SpecialOpConverter>&
get_special_operators() const;
// Dummy name generator
std::shared_ptr<DummyName> dummy_;
};
} // namespace onnx
} // namespace caffe2
| 4,843
| 32.178082
| 80
|
h
|
null |
pytorch-main/caffe2/onnx/onnxifi_graph_info.h
|
#pragma once
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_map>
#include "caffe2/core/logging.h"
#include "caffe2/opt/shape_info.h"
#include "foxi/onnxifi_loader.h"
namespace caffe2 {
namespace onnx {
struct BackendGraphInfo {
onnxBackendID backend_id;
onnxBackend backend;
onnxGraph graph;
onnxifi_library* lib{nullptr};
std::unordered_map<std::string, ShapeInfo> weight_shape_info;
BackendGraphInfo(
onnxBackendID backend_id,
onnxBackend backend,
onnxGraph graph,
onnxifi_library* lib,
std::unordered_map<std::string, ShapeInfo>&& s)
: backend_id(backend_id),
backend(backend),
graph(graph),
lib(lib),
weight_shape_info(std::move(s)) {}
BackendGraphInfo(const BackendGraphInfo& other) = delete;
BackendGraphInfo& operator=(const BackendGraphInfo& other) = delete;
BackendGraphInfo(BackendGraphInfo&& other) noexcept {
backend_id = other.backend_id;
backend = other.backend;
graph = other.graph;
lib = other.lib;
weight_shape_info = std::move(other.weight_shape_info);
other.backend_id = other.backend = other.graph = other.lib = nullptr;
}
BackendGraphInfo& operator=(BackendGraphInfo&& other) {
backend_id = other.backend_id;
backend = other.backend;
graph = other.graph;
lib = other.lib;
weight_shape_info = std::move(other.weight_shape_info);
other.backend_id = other.backend = other.graph = other.lib = nullptr;
return *this;
}
~BackendGraphInfo() {
if (lib) {
onnxStatus err;
if (graph) {
err = lib->onnxReleaseGraph(graph);
if (err != ONNXIFI_STATUS_SUCCESS) {
LOG(ERROR) << "Error when calling onnxReleaseGraph";
}
}
if (backend) {
err = lib->onnxReleaseBackend(backend);
if (err != ONNXIFI_STATUS_SUCCESS) {
LOG(ERROR) << "Error when calling onnxReleaseBackend";
}
}
if (backend_id) {
err = lib->onnxReleaseBackendID(backend_id);
if (err != ONNXIFI_STATUS_SUCCESS) {
LOG(ERROR) << "Error when calling onnxReleaseBackendID";
}
}
}
}
};
using SharedPtrBackendGraphInfo = std::shared_ptr<BackendGraphInfo>;
// This class maintains a map of already created graph for nets+ops
class OnnxBackendGraphMap {
public:
OnnxBackendGraphMap() {}
// Make class noncopyable and nomovable.
OnnxBackendGraphMap(const OnnxBackendGraphMap&) = delete;
OnnxBackendGraphMap(OnnxBackendGraphMap&&) = delete;
OnnxBackendGraphMap operator=(const OnnxBackendGraphMap&) = delete;
OnnxBackendGraphMap operator=(OnnxBackendGraphMap&&) = delete;
SharedPtrBackendGraphInfo lookup(const std::string& key);
// If corresponding BackendGraphInfo already exists, return it directly.
// Otherwise we use creator to create the BackendGraphInfo shared_ptr and
// insert it into the map and return it. The whole process should be guarded
// by a lock. Note that since it will create the backend while holding the
// lock, expect latency during initialization phase when there are lots of
// models to compile.
SharedPtrBackendGraphInfo insert(
const std::string& key,
std::function<SharedPtrBackendGraphInfo()> creator);
void remove(const std::string& key);
private:
std::mutex backend_graph_map_lock_;
std::unordered_map<std::string, SharedPtrBackendGraphInfo> backend_graph_map_;
};
OnnxBackendGraphMap* getOnnxBackendGraphMap();
} // namespace onnx
} // namespace caffe2
| 3,539
| 29.782609
| 80
|
h
|
null |
pytorch-main/caffe2/onnx/torch_ops/operator_sets.h
|
#pragma once
#include "onnx/defs/schema.h"
namespace ONNX_NAMESPACE {
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch,
1,
SparseLengthsSumFused8BitRowwise);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, SparseLengthsSum);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, SparseLengthsWeightedSum);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, BatchGather);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, DotProduct);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, FCTransposed);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, BatchMatMul);
class ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(PyTorch, 1, ExpandDims);
// Iterate over schema from ai.onnx.pytorch domain opset 1
class OpSet_PyTorch_ver1 {
public:
static void ForEachSchema(std::function<void(OpSchema&&)> fn) {
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, SparseLengthsSumFused8BitRowwise)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, SparseLengthsSum)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, SparseLengthsWeightedSum)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, BatchGather)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, DotProduct)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, FCTransposed)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, BatchMatMul)>());
fn(GetOpSchema<ONNX_OPERATOR_SET_SCHEMA_CLASS_NAME(
PyTorch, 1, ExpandDims)>());
}
};
inline void RegisterPyTorchOperatorSetSchema() {
RegisterOpSetSchema<OpSet_PyTorch_ver1>();
}
} // namespace ONNX_NAMESPACE
| 1,779
| 36.87234
| 80
|
h
|
null |
pytorch-main/caffe2/operators/accumulate_op.h
|
#ifndef CAFFE2_OPERATORS_ACCUMULATE_OP_H_
#define CAFFE2_OPERATORS_ACCUMULATE_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class AccumulateOp final : public Operator<Context> {
public:
template <class... Args>
explicit AccumulateOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
gamma_(static_cast<T>(
this->template GetSingleArgument<float>("gamma", 1.0))) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
auto& input = Input(0);
// TODO: the operator depends on output being set to 0 before the run
auto* output = Output(0, input.sizes(), at::dtype<T>());
math::Axpby<T, T, Context>(
input.numel(),
static_cast<T>(1),
input.template data<T>(),
gamma_,
output->template mutable_data<T>(),
&context_);
return true;
}
protected:
T gamma_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_ACCUMULATE_OP_H_
| 1,073
| 24.571429
| 73
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.