repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/aten/src/ATen/detail/MPSHooksInterface.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#include <c10/core/Allocator.h>
#include <ATen/core/Generator.h>
#include <c10/util/Exception.h>
#include <c10/util/Registry.h>
#include <cstddef>
#include <functional>
namespace at {
class Context;
}
namespace at {
struct TORCH_API MPSHooksInterface {
// this fails the implementation if MPSHooks functions are called, but
// MPS backend is not present.
#define FAIL_MPSHOOKS_FUNC(func) \
TORCH_CHECK(false, "Cannot execute ", func ,"() without MPS backend.");
virtual ~MPSHooksInterface() = default;
// Initialize the MPS library state
virtual void initMPS() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual bool hasMPS() const {
return false;
}
virtual bool isOnMacOS13orNewer(unsigned minor = 0) const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual const Generator& getDefaultMPSGenerator() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual Allocator* getMPSDeviceAllocator() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual void deviceSynchronize() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual void commitStream() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual void* getCommandBuffer() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual void* getDispatchQueue() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual void emptyCache() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual size_t getCurrentAllocatedMemory() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual size_t getDriverAllocatedMemory() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual void setMemoryFraction(double /*ratio*/) const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const {
FAIL_MPSHOOKS_FUNC(__func__);
}
virtual void profilerStopTrace() const {
FAIL_MPSHOOKS_FUNC(__func__);
}
#undef FAIL_MPSHOOKS_FUNC
};
struct TORCH_API MPSHooksArgs {};
TORCH_DECLARE_REGISTRY(MPSHooksRegistry, MPSHooksInterface, MPSHooksArgs);
#define REGISTER_MPS_HOOKS(clsname) \
C10_REGISTER_CLASS(MPSHooksRegistry, clsname, clsname)
namespace detail {
TORCH_API const MPSHooksInterface& getMPSHooks();
} // namespace detail
} // namespace at
| 2,254
| 24.055556
| 91
|
h
|
null |
pytorch-main/aten/src/ATen/detail/MTIAHooksInterface.h
|
#pragma once
#include <c10/core/Device.h>
#include <c10/util/Exception.h>
#include <c10/util/Registry.h>
#include <cstddef>
#include <functional>
#include <memory>
namespace at {
class Context;
}
// We use forward declaration here instead of #include <ATen/dlpack.h> to avoid
// leaking DLPack implementation detail to every project that includes `ATen/Context.h`, which in turn
// would lead to a conflict when linked with another project using DLPack (for example TVM)
struct DLDevice_;
namespace at {
constexpr const char* MTIA_HELP =
"The MTIA backend requires MTIA extension for PyTorch;"
"this error has occurred because you are trying "
"to use some MTIA's functionality without MTIA extension included.";
struct TORCH_API MTIAHooksInterface {
virtual ~MTIAHooksInterface() = default;
virtual void initMTIA() const {
TORCH_CHECK(
false,
"Cannot initialize MTIA without MTIA Extension for PyTorch.",
MTIA_HELP);
}
virtual bool hasMTIA() const {
return false;
}
virtual std::string showConfig() const {
TORCH_CHECK(
false,
"Cannot query detailed MTIA version without MTIA Extension for PyTorch.",
MTIA_HELP);
}
};
struct TORCH_API MTIAHooksArgs {};
C10_DECLARE_REGISTRY(MTIAHooksRegistry, MTIAHooksInterface, MTIAHooksArgs);
#define REGISTER_MTIA_HOOKS(clsname) \
C10_REGISTER_CLASS(MTIAHooksRegistry, clsname, clsname)
namespace detail {
TORCH_API const MTIAHooksInterface& getMTIAHooks();
} // namespace detail
} // namespace at
| 1,538
| 24.65
| 102
|
h
|
null |
pytorch-main/aten/src/ATen/detail/ORTHooksInterface.h
|
#pragma once
#include <c10/util/Exception.h>
#include <c10/util/Registry.h>
constexpr const char* ORT_HELP =
" You need to 'import torch_ort' to use the 'ort' device in PyTorch. "
"The 'torch_ort' module is provided by the ONNX Runtime itself "
"(https://onnxruntime.ai).";
// NB: Class must live in `at` due to limitations of Registry.h.
namespace at {
struct TORCH_API ORTHooksInterface {
// This should never actually be implemented, but it is used to
// squelch -Werror=non-virtual-dtor
virtual ~ORTHooksInterface() = default;
virtual std::string showConfig() const {
TORCH_CHECK(false, "Cannot query detailed ORT version information.", ORT_HELP);
}
};
// NB: dummy argument to suppress "ISO C++11 requires at least one argument
// for the "..." in a variadic macro"
struct TORCH_API ORTHooksArgs {};
TORCH_DECLARE_REGISTRY(ORTHooksRegistry, ORTHooksInterface, ORTHooksArgs);
#define REGISTER_ORT_HOOKS(clsname) \
C10_REGISTER_CLASS(ORTHooksRegistry, clsname, clsname)
namespace detail {
TORCH_API const ORTHooksInterface& getORTHooks();
} // namespace detail
} // namespace at
| 1,113
| 29.108108
| 83
|
h
|
null |
pytorch-main/aten/src/ATen/detail/XPUHooksInterface.h
|
#pragma once
#include <c10/core/Device.h>
#include <c10/util/Exception.h>
#include <c10/util/Registry.h>
#include <cstddef>
#include <functional>
#include <memory>
namespace at {
class Context;
}
// We use forward declaration here instead of #include <ATen/dlpack.h> to avoid
// leaking DLPack implementation detail to every project that includes `ATen/Context.h`, which in turn
// would lead to a conflict when linked with another project using DLPack (for example TVM)
struct DLDevice_;
namespace at {
constexpr const char* XPU_HELP =
"The XPU backend requires Intel Extension for Pytorch;"
"this error has occurred because you are trying "
"to use some XPU's functionality, but the Intel Extension for Pytorch has not been "
"loaded for some reason. The Intel Extension for Pytorch MUST "
"be loaded, EVEN IF you don't directly use any symbols from that!";
struct TORCH_API XPUHooksInterface {
virtual ~XPUHooksInterface() {}
virtual void initXPU() const {
TORCH_CHECK(
false,
"Cannot initialize XPU without Intel Extension for Pytorch.",
XPU_HELP);
}
virtual bool hasXPU() const {
return false;
}
virtual std::string showConfig() const {
TORCH_CHECK(
false,
"Cannot query detailed XPU version without Intel Extension for Pytorch. ",
XPU_HELP);
}
virtual Device getATenDeviceFromDLPackDevice(
const DLDevice_& dl_device,
void* data) const {
TORCH_CHECK(
false,
"Cannot get XPU device without Intel Extension for Pytorch. ",
XPU_HELP);
}
virtual DLDevice_& getDLPackDeviceFromATenDevice(
DLDevice_& dl_device,
const Device& aten_device,
void* data) const {
TORCH_CHECK(
false,
"Cannot get XPU DL device without Intel Extension for Pytorch. ",
XPU_HELP);
}
};
struct TORCH_API XPUHooksArgs {};
C10_DECLARE_REGISTRY(XPUHooksRegistry, XPUHooksInterface, XPUHooksArgs);
#define REGISTER_XPU_HOOKS(clsname) \
C10_REGISTER_CLASS(XPUHooksRegistry, clsname, clsname)
namespace detail {
TORCH_API const XPUHooksInterface& getXPUHooks();
} // namespace detail
} // namespace at
| 2,176
| 25.876543
| 102
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/ADInterpreters.h
|
#pragma once
#include <ATen/functorch/Interpreter.h>
namespace at::functorch {
// These are the interpreters for our AD transforms
// (grad, vjp and jvp).
// See NOTE: [functorch interpreter stack] for more details.
struct TORCH_API GradInterpreterPtr {
explicit GradInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Grad); }
TransformType key() const { return base_->key(); }
int64_t level() const { return base_->level(); }
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
bool prevGradMode() const {
return c10::get<GradInterpreterMeta>(base_->meta()).prevGradMode_;
}
Tensor lift(const Tensor& tensor) const;
private:
const Interpreter* base_;
};
struct TORCH_API JvpInterpreterPtr {
explicit JvpInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Jvp); }
TransformType key() const { return base_->key(); }
int64_t level() const { return base_->level(); }
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
bool prevFwdGradMode() const {
return c10::get<JvpInterpreterMeta>(base_->meta()).prevFwdGradMode_;
}
Tensor lift(const Tensor& tensor) const;
private:
const Interpreter* base_;
};
} // namespace at::functorch
| 1,560
| 39.025641
| 130
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/BatchedFallback.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/ATen.h>
#include <ATen/core/op_registration/op_registration.h>
#include <torch/library.h>
namespace at::functorch {
// This file contains code for the vmap fallback (also known as the
// BatchedTensor fallback or the Batched fallback). This code runs
// when an operation doesn't have a batching rule implemented.
// If an operator doesn't have a batching rule implemented then we fallback
// to this implementation. The fallback doesn't work on out= variants or
// view operations; that is, it works for out-of-place operations and
// in-place non-view operations.
//
// For out-of-place operations, the fallback effectively takes all of the
// BatchedTensors in `stack`, slices them, and runs `op` on all of the
// corresponding slices to produce slices of the outputs. The output slices
// then get `torch.stack`ed to create the
// final returns.
//
// The performance of the fallback is not very good because it introduces an
// extra copy from stacking the sliced outputs. Because of this, we prefer to
// write batching rules for operators whenever possible.
void batchedTensorForLoopFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack);
void vmapErrorFallback(const c10::OperatorHandle& op, torch::jit::Stack* stack);
// The vmap fallback emits a warning by default, but it may be disabled if
// the user finds it to be too annoying.
TORCH_API bool isVmapFallbackWarningEnabled();
TORCH_API void setVmapFallbackWarningEnabled(bool enabled);
// Used for testing. The vmap fallback is enabled by default. When it is disabled,
// it raises an error.
TORCH_API bool isVmapFallbackEnabled();
TORCH_API void setVmapFallbackEnabled(bool enabled);
template <typename A> A vector_to_result(const std::vector<IValue>& buffer) {
return buffer[0].to<A>();
}
template <typename A, typename B> std::tuple<A, B> vector_to_result(const std::vector<IValue>& buffer) {
return std::make_tuple(buffer[0].to<A>(), buffer[1].to<B>());
}
template <typename A, typename B, typename C> std::tuple<A, B, C> vector_to_result(const std::vector<IValue>& buffer) {
return std::make_tuple(buffer[0].to<A>(), buffer[1].to<B>(), buffer[2].to<B>());
}
// slow_fallback is a way to call the vmap fallback inside some boxed kernel.
// There is probably some better way to metaprogram this.
template <typename Ret>
Ret slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
std::vector<IValue> stack(args.begin(), args.end());
batchedTensorForLoopFallback(op, &stack);
return vector_to_result<Ret>(stack);
}
template <typename A, typename B>
std::tuple<A, B> slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
std::vector<IValue> stack(args.begin(), args.end());
batchedTensorForLoopFallback(op, &stack);
return vector_to_result<A, B>(stack);
}
template <typename A, typename B, typename C>
std::tuple<A, B, C> slow_fallback(const c10::OperatorHandle& op, ArrayRef<IValue> args) {
std::vector<IValue> stack(args.begin(), args.end());
batchedTensorForLoopFallback(op, &stack);
return vector_to_result<A, B, C>(stack);
}
} // namespace at::functorch
| 3,341
| 40.259259
| 119
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/BatchedTensorImpl.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <bitset>
#include <utility>
#include <ATen/ArrayRef.h>
#include <ATen/SmallVector.h>
#include <ATen/Tensor.h>
namespace at::functorch {
using Tensor = at::Tensor;
// We assume this in a few other places in the codebase,
// but there isn't a centralized definition.
constexpr int64_t kVmapMaxTensorDims = 64;
// The valid vmap levels range from [0, 64). This effectively means that we
// support a maximum of 64 nested vmaps.
constexpr int64_t kVmapNumLevels = 64;
// Store this number of elements of BatchDims on the stack. Most people will
// probably use <= 5 nested vmaps, but adjust this number as necessary.
constexpr int64_t kBatchDimsStackSize = 5;
// A BatchedTensorImpl holds an underlying Tensor and a single batch dim
// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
// BatchedTensorImpl.
//
// The batch dimensions are treated as being "private"; they are not user-visible.
// For example, in the following Tensor,
// bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0)
// dimension 0 is batch dimension.
//
// bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public)
// dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7) tensor.
struct TORCH_API BatchedTensorImpl : public c10::TensorImpl {
explicit BatchedTensorImpl(at::DispatchKeySet key_set, Tensor value, int64_t dim, int64_t level);
// Returns batch dimension of this tensor
int64_t bdim() const { return bdim_; }
// Returns batch dimension of this tensor
int64_t level() const { return level_; }
// BatchedTensorImpl wraps a Tensor
const Tensor& value() const { return value_; }
// Given a public dimension index, return the dimension index in the underlying
// value() tensor.
// For example, if we have
// bt = BatchedTensorImpl(ones(2, 3, 5, 7), lvl=1, dim=0)
// bt.actualDim(0) -> 1
// bt.actualDim(1) -> 2
// bt.actualDim(2) -> 3
// bt.actualDim(3) -> Error
int64_t actualDim(int64_t dim, bool wrap_dim = true) const;
// We have to override this because we opted into CustomStrides
IntArrayRef strides_custom() const override;
SymIntArrayRef sym_strides_custom() const override;
// Override a bunch of methods inherited from TensorImpl to return error messages.
bool is_contiguous_custom(at::MemoryFormat memory_format=at::MemoryFormat::Contiguous) const override;
void set_size(int64_t dim, int64_t new_size) override;
void set_stride(int64_t dim, int64_t new_stride) override;
void set_storage_offset(int64_t storage_offset) override;
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
const c10::VariableVersion& version_counter,
bool allow_tensor_metadata_change) const override;
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) const override;
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
#ifdef DEBUG
bool has_storage() const override;
#endif
void refreshTensorMetadata();
// Used in torchdim. torchdim uses non-lexical BatchedTensor; the way it
// accomplishes this is a hack where it is able to modify the levels of
// BatchedTensor to match the level of the current vmap transform.
void _unsafe_set_level(int64_t level) {
level_ = level;
}
// Used in batching rule for in-place view operations that can change
// the index of the bdim (think squeeze_, unsqueeze_)
void unsafe_set_bdim(int64_t bdim) {
// NB: you MUST call refreshTensorMetadata after doing this.
bdim_ = bdim;
}
private:
// see NOTE: [BatchedTensorImpl levels invariant]
void checkInvariants() const;
const char* tensorimpl_type_name() const override;
Tensor value_;
int64_t level_;
int64_t bdim_;
};
// NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a
// BatchedTensorImpl.
inline bool isBatchedTensor(const Tensor& tensor) {
return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::FuncTorchBatched);
}
// It is unsafe to call this on a Tensor that is not backed by a
// BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible.
inline BatchedTensorImpl* unsafeGetBatchedImpl(Tensor tensor) {
return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl());
}
inline BatchedTensorImpl* maybeGetBatchedImpl(Tensor tensor) {
if (!isBatchedTensor(tensor)) {
return nullptr;
}
return unsafeGetBatchedImpl(std::move(tensor));
}
// Returns a bitset. If bit i is set, then that means dim i is a batchdim.
inline std::bitset<kVmapMaxTensorDims> createBatchDimBitset(int64_t dim) {
std::bitset<kVmapMaxTensorDims> is_bdim;
is_bdim.set(dim);
return is_bdim;
}
// Creates a bitset for the given level
inline std::bitset<kVmapNumLevels> createVmapLevelsBitset(int64_t level) {
std::bitset<kVmapNumLevels> result;
result.set(level);
return result;
}
// Use this to construct a BatchedTensor from a regular Tensor
TORCH_API Tensor makeBatched(const Tensor& tensor, int64_t dim, int64_t level);
// Adds a batch dim to `tensor`, returning a BatchedTensor
TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t dim, int64_t level);
// Certain dispatch keys must be propagated to the BatchedTensor (or, in general,
// any wrapper Tensor subclasses). This is because there are methods on Tensor
// that skip dispatch and check for the presence of a dispatch key (e.g. is_cpu()).
// TODO: should probably contain more (or all?) backend keys
constexpr DispatchKeySet kKeysToPropagateToWrapper({
DispatchKey::Negative,
DispatchKey::Conjugate,
DispatchKey::XLA,
DispatchKey::CUDA,
DispatchKey::CPU,
});
inline DispatchKeySet getKeysToPropagateToWrapper(const Tensor& tensor, DispatchKeySet to_propagate=kKeysToPropagateToWrapper) {
auto key_set = tensor.unsafeGetTensorImpl()->key_set();
return key_set & kKeysToPropagateToWrapper;
}
} // namespace at::functorch
| 6,166
| 35.928144
| 128
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/BatchingMetaprogramming.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/Tensor.h>
#include <ATen/VmapGeneratedPlumbing.h>
// This file contains template metaprogramming things that are used for our
// batching rules.
//
// See NOTE: [vmap plumbing] for more details on why this is necessary.
// The plumbing has a bunch of metaprogramming hacks for determining the signature
// of a batching rule from the signature of the operator, many of which use the
// helper functions in this file.
namespace at::functorch {
// Metaprogramming things
template <class... Items> using typelist = c10::guts::typelist::typelist<Items...>;
template <class TypeList> using head_t = c10::guts::typelist::head_t<TypeList>;
template <class TL1, class TL2> using concat_t = c10::guts::typelist::concat_t<TL1, TL2>;
template <typename T> class debug_t;
// tail operation
template<class TypeList>
struct tail final {
static_assert(c10::guts::false_t<TypeList>::value,
"In typelist::tail<T>, the T argument must be typelist<...>.");
};
template<class Head, class... Tail>
struct tail<typelist<Head, Tail...>> final {
using type = typelist<Tail...>;
};
template<class TypeList> using tail_t = typename tail<TypeList>::type;
template <class First, class Second, class Next, class Tail>
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext {
using type = Next;
};
template <class Next, class Tail>
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<Tensor, optional<int64_t>, Next, Tail> {
using type = Tail;
};
template <class Next, class Tail>
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<const Tensor&, optional<int64_t>, Next, Tail> {
using type = Tail;
};
template <class Next, class Tail>
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<Tensor&, optional<int64_t>, Next, Tail> {
using type = Tail;
};
template <class Next, class Tail>
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<optional<Tensor>, optional<int64_t>, Next, Tail> {
using type = Tail;
};
template <class Next, class Tail>
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<const optional<Tensor>&, optional<int64_t>, Next, Tail> {
using type = Tail;
};
template <class Next, class Tail>
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<optional<Tensor>&, optional<int64_t>, Next, Tail> {
using type = Tail;
};
template <class Next, class Tail>
struct IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<std::vector<Tensor>, optional<int64_t>, Next, Tail> {
using type = Tail;
};
template <class TypeList> struct RemoveBatchDimAfterTensor {
using first = head_t<TypeList>;
using next = tail_t<TypeList>;
using second = head_t<next>;
using tail = tail_t<next>;
using type = concat_t<
typelist<first>,
typename RemoveBatchDimAfterTensor<
typename IfFirstIsTensorAndSecondisBatchDimThenTailElseNext<first, second, next, tail>::type
>::type
>;
};
template <class Type> struct RemoveBatchDimAfterTensor<typelist<Type>> {
using type = typelist<Type>;
};
template <> struct RemoveBatchDimAfterTensor<typelist<>> {
using type = typelist<>;
};
template<class TypeList> using remove_batch_dim_after_tensor_t = typename RemoveBatchDimAfterTensor<TypeList>::type;
template <typename T> struct UnpackSingleItemTuple {
using type = T;
};
template <typename T> struct UnpackSingleItemTuple<std::tuple<T>> {
using type = T;
};
template <typename T> using unpack_single_item_tuple_t = typename UnpackSingleItemTuple<T>::type;
template <typename Return, typename TupleArgs> struct BuildFunctionHelper;
template <typename Return, typename... Args> struct BuildFunctionHelper<Return, std::tuple<Args...>> {
using type = Return(Args...);
};
template <typename Return, typename TL>
struct BuildFunction {
using type = typename BuildFunctionHelper<Return, c10::guts::typelist::to_tuple_t<TL>>::type;
};
template <typename Return, typename TL> using build_function_t = typename BuildFunction<Return, TL>::type;
template <typename batch_rule_t> struct ToOperatorType {
using batch_rule_return_type = typename c10::guts::function_traits<batch_rule_t>::return_type;
using batch_rule_parameter_types = typename c10::guts::function_traits<batch_rule_t>::parameter_types;
using operator_parameter_types = remove_batch_dim_after_tensor_t<batch_rule_parameter_types>;
using operator_return_type =
unpack_single_item_tuple_t<
c10::guts::typelist::to_tuple_t<
remove_batch_dim_after_tensor_t<
c10::guts::typelist::from_tuple_t<batch_rule_return_type>>>>;
using type = build_function_t<operator_return_type, operator_parameter_types>;
};
template <typename batch_rule_t> using to_operator_t = typename ToOperatorType<batch_rule_t>::type;
} // namespace at::functorch
| 4,940
| 37.905512
| 116
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/DynamicLayer.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/functorch/Macros.h>
#include <c10/core/DispatchKey.h>
#include <ATen/core/function_schema.h>
#include <c10/util/Optional.h>
#include <c10/util/variant.h>
#include <unordered_map>
#include <mutex>
#include <c10/core/impl/LocalDispatchKeySet.h>
#include <ATen/functorch/Interpreter.h>
#include <ATen/functorch/VmapInterpreter.h>
#include <ATen/functorch/ADInterpreters.h>
#include <ATen/functorch/FunctionalizeInterpreter.h>
// Forward declared
namespace c10 { struct AutogradMetaInterface; }
namespace at::functorch {
// This file contains the implementation of functorch's interpreter stack.
// See NOTE: [functorch interpreter stack] first before reading on.
//
// NB: the functorch interpreter stack is also referred to as:
// - the "dynamic layer stack" -- an older name for "interpreter" was
// "dynamic layer".
// - the "functorch mode stack". You can think of each functorch transform as a
// "mode" (in the same sense as torch_dispatch mode or torch_function mode),
// and functorch being an implementation of a "mode stack" where the modes
// may be arbitrary composed.
// DynamicLayer is basically the same thing as an Interpreter.
// It represents a functorch transform and it holds an Interpreter,
// which contains metadata related to the transform and instructions on
// how to perform the transform.
//
// TODO: we can excise DynamicLayer in favor of Interpreter,
// But I am going to leave it for now as a compatiblity shim to avoid
// needing to refactor a lot of callsites...
struct TORCH_API DynamicLayer {
explicit DynamicLayer(
TransformType transform_type,
int64_t layerId,
optional<c10::SymInt> batchSize = nullopt,
optional<RandomnessType> randomness = nullopt,
optional<bool> prev_grad_mode = nullopt,
optional<bool> pre_fwd_grad_mode = nullopt,
optional<bool> functionalize_add_back_views = nullopt);
TransformType key() const;
int64_t layerId() const;
const Interpreter& interpreter() const { return interpreter_; }
Interpreter& interpreter() { return interpreter_; }
// Only valid for vmap
c10::SymInt batchSize() const;
RandomnessType randomness() const;
private:
Interpreter interpreter_;
};
TORCH_API int64_t initAndPushDynamicLayer(
TransformType transform_type,
optional<c10::SymInt> batch_size = nullopt,
optional<RandomnessType> randomness = nullopt,
optional<bool> prev_grad_mode = nullopt,
optional<bool> prev_fwd_grad_mode = nullopt,
optional<bool> functionalize_add_back_views = nullopt);
TORCH_API DynamicLayer popDynamicLayerAndDeleteMetadata();
TORCH_API c10::optional<DynamicLayer> maybeCurrentDynamicLayer();
TORCH_API const std::vector<DynamicLayer>& getDynamicLayerStack();
TORCH_API void setDynamicLayerStack(const std::vector<DynamicLayer>& stack);
TORCH_API void setDynamicLayerFrontBackKeysIncluded(bool included);
// NOTE: [Life handles and lexically scoped transforms]
// functorch transforms are lexically scoped.
// Given a level, we store a "life handle" that is a boolean that tells us if the
// transform with that level is active or not.
//
// functorch's TensorWrapper (for grad transforms) stores a life handle.
// If a TensorWrapper escapes from the scope of the transform, then somehow
// it must know it escaped; it can tell by querying the life handle.
TORCH_API const std::shared_ptr<bool>& getLifeHandleForLevel(int64_t level);
// Returns if an operator is in-place. An operator is inplace if:
// 1. The first argument is a Tensor and it is being written to
// 2. The first argument is being returned
// 3. No other arguments are aliased
// Here is an example of an in-place operator:
// add_(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)
TORCH_API bool isInplaceOp(const c10::FunctionSchema& schema);
// Given the indices of unwrapped inputs and the schema, this returns the indices of any outputs that should remain unwrapped
TORCH_API c10::optional<size_t> findAliasedOutput(const FunctionSchema& schema, const int64_t immutable_input);
TORCH_API Tensor unwrapIfDead(const Tensor& tensor);
TORCH_API bool isDeadTensorWrapper(const Tensor& tensor);
// Pretty printers
TORCH_API std::ostream& operator<<(std::ostream& os, const DynamicLayer& layer);
TORCH_API std::ostream& operator<<(std::ostream& os, const std::vector<DynamicLayer>& dynamicLayerStack);
// While a functorch transform is active, torch.autograd.function._SingleLevelFunction
// is disabled by default. The following two APIs are APIs for enabling
// it. These are not user-facing APIs. We can delete this in the future, but
// it is useful for debugging when something goes wrong with the
// autograd.Function <> functorch interaction, which uses _SingleLevelFunction,
// because it leads to loud errors if something is incorrect.
TORCH_API void setSingleLevelAutogradFunctionAllowed(bool allowed);
TORCH_API bool getSingleLevelAutogradFunctionAllowed();
// While a functorch grad transform is active, Tensor.requires_grad_() gets
// disabled. These two functions are the mechanism to controlling that.
TORCH_API void setInplaceRequiresGradAllowed(bool allowed);
TORCH_API bool getInplaceRequiresGradAllowed();
TORCH_API DynamicLayer popDynamicLayer();
TORCH_API int64_t pushDynamicLayer(DynamicLayer&& layer);
} // namespace at::functorch
| 5,544
| 42.320313
| 125
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/Interpreter.h
|
#pragma once
#include <ATen/functorch/Macros.h>
#include <ATen/core/dispatch/Dispatcher.h>
#include <c10/core/impl/LocalDispatchKeySet.h>
#include <c10/util/Optional.h>
#include <c10/util/variant.h>
#include <bitset>
namespace at::functorch {
// NOTE: [functorch interpreter stack]
//
// functorch's dispatching system uses a stack of interpreters.
// Historically we've referred to this as the "DynamicLayerStack".
//
// An interpreter is something that reads in the code it is passed
// and then executes it. We have a different interpreter per-transform:
// the "VmapInterpreter" is responsible for reading in operators (like aten::mv)
// and executing the batched version of it (the batching rule for aten::mv).
//
// Concretely, each interpreter is responsible for two things:
//
// 1) process(ophandle, stack)
// Given an operator handle and a stack of arguments, the interpreter is
// responsible for figuring out how to execute the operation under the semantics
// of the interpreter. For e.g. VmapInterpreter, this is figuring out how to call
// the batching rule.
//
// The batching rules are stored as kernels on the FuncTorchBatched key, so the way
// VmapInterpreter calls the batching rule is roughly: (A) exclude all
// dispatch keys aside from the Batched key, (B) redispatch so we get to the
// Batched key.
//
// 2) sendToNextInterpreter(ophandle, stack)
// The VmapInterpreter, when it sees aten::mv, will process it into a call to
// aten::mm. It then needs to send the call to aten::mm to the next interpreter
// in the interpreter stack.
//
// The VmapInterpreter just does this via a call to ophandle.callBoxed(stack)
// and most Interpreters will implement it this way.
enum class RandomnessType {
Error, // always errors when calling a random function
Same, // randomness appears the same across batches
Different, // randomness appears different across batches
END
};
enum class TransformType {
Torch, // Unused
Vmap,
Grad, // reverse-mode AD, aka vjp
Jvp, // forward-mode AD
Functionalize,
};
std::ostream& operator<<(std::ostream& os, const TransformType& t);
// NOTE: [Interpreter "subclassing" design]
//
// How are various Interpreters for different transforms (vmap, grad, ...)
// implemented?
//
// Accessing interpreters is in the hot-path of functorch so we have a constraint
// that this code must be as fast as possible.
//
// As a result, we stay away from virtual methods and this causes our code
// to look a little funny.
//
// `Interpreter` is the struct for Interpreters. It holds ALL of the
// relevant information (what type of interpreter it is and the metadata).
// Metadata for each interpreter is represented as a Union (c10::variant)
// of all possible metadata (VmapInterpreterMeta, GradInterpreterMeta, ...).
//
// Given an Interpreter, how do I get a "VmapInterpreter"? You may wish to do this
// if you want to access the metadata fields (like batchSize and randomness).
//
// Each type of interpreter (e.g. Vmap) has a convenience struct
// (e.g. VmapInterpreterPtr) associated with it.
//
// Construct the convenience struct with VmapInterpreterPtr(Interpreter*),
// and then one can access methods on VmapInterpreterPtr like so:
// >>> VmapInterpreterPtr(&interpreter).batchSize()
//
// Finally, Interpreter::process switches on the type of the interpreter
// and calls one of {Transform}Intepreter::processImpl under the hood.
// Same for Interpreter::sendToNextInterpreter :)
struct VmapInterpreterMeta {
explicit VmapInterpreterMeta(c10::SymInt batchSize, RandomnessType randomness) :
batchSize_(std::move(batchSize)), randomness_(randomness) {}
c10::SymInt batchSize_;
RandomnessType randomness_;
};
struct GradInterpreterMeta {
explicit GradInterpreterMeta(bool prevGradMode): prevGradMode_(prevGradMode) {}
bool prevGradMode_;
};
struct JvpInterpreterMeta {
explicit JvpInterpreterMeta(bool prevFwdGradMode) : prevFwdGradMode_(prevFwdGradMode) {}
bool prevFwdGradMode_;
};
struct FunctionalizeInterpreterMeta {
explicit FunctionalizeInterpreterMeta(bool functionalizeAddBackViews) :
functionalizeAddBackViews_(functionalizeAddBackViews) {}
bool functionalizeAddBackViews_;
};
typedef c10::variant<
int64_t,
GradInterpreterMeta,
JvpInterpreterMeta,
VmapInterpreterMeta,
FunctionalizeInterpreterMeta
> InterpreterMeta;
struct Interpreter {
// factory functions
static Interpreter Vmap(int64_t level, c10::SymInt batchSize, RandomnessType randomness) {
return Interpreter(TransformType::Vmap, level, VmapInterpreterMeta(std::move(batchSize), randomness));
}
static Interpreter Grad(int64_t level, bool prevGradMode) {
return Interpreter(TransformType::Grad, level, GradInterpreterMeta(prevGradMode));
}
static Interpreter Jvp(int64_t level, bool prevFwdGradMode) {
return Interpreter(TransformType::Jvp, level, JvpInterpreterMeta(prevFwdGradMode));
}
static Interpreter Functionalize(int64_t level, bool functionalizeAddBackViews) {
return Interpreter(TransformType::Functionalize, level, FunctionalizeInterpreterMeta(functionalizeAddBackViews));
}
// methods
TransformType key() const { return type_; }
int64_t level() const { return level_; }
const InterpreterMeta& meta() const { return meta_; }
void process(const c10::OperatorHandle& op, torch::jit::Stack* stack);
void sendToNextInterpreter(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
void saveLocalDispatchKeySet(c10::impl::LocalDispatchKeySet keyset) {
TORCH_INTERNAL_ASSERT(!savedLocalDispatchKeySet_.has_value());
savedLocalDispatchKeySet_ = std::move(keyset);
}
void clearSavedLocalDispatchKeySet() {
TORCH_INTERNAL_ASSERT(savedLocalDispatchKeySet_.has_value());
savedLocalDispatchKeySet_ = c10::nullopt;
}
c10::impl::LocalDispatchKeySet getSavedLocalDispatchKeySet() const {
TORCH_INTERNAL_ASSERT(savedLocalDispatchKeySet_.has_value());
return *savedLocalDispatchKeySet_;
}
// An Interpreter is alive if we are currently inside the ongoing transform
// for the interpreter. For example, vmap(f)(x); inside of f, the vmap's
// corresponding Interpreter is alive, even when it is not on the DynamicLayerStack.
bool is_alive() const {
return *is_alive_;
}
const std::shared_ptr<bool>& is_alive_ptr() const {
return is_alive_;
}
void set_is_alive(bool alive) {
*is_alive_ = alive;
}
// Please don't use this
explicit Interpreter() = default;
private:
explicit Interpreter(TransformType type, int64_t level, InterpreterMeta meta):
type_(type), level_(level), is_alive_(std::make_shared<bool>(false)), meta_(meta) {}
// fields
TransformType type_;
int64_t level_;
optional<c10::impl::LocalDispatchKeySet> savedLocalDispatchKeySet_;
std::shared_ptr<bool> is_alive_;
InterpreterMeta meta_;
};
// Applies the following for-loop:
// for i in range(begin, end):
// args[i] = func(args[i])
void foreachTensorInplace(std::vector<IValue>& args, int64_t begin, int64_t end,
std::function<Tensor(const Tensor&)> func);
// Applies the following for-loop:
// for i in range(begin, end):
// if use_flag_relative[i] == 1: <-- treats use_flag_relative as a bitset
// args[i] = func(args[i], i - begin, true)
// args[i] = func(args[i], i - begin)
void foreachTensorInplaceWithFlag(std::vector<IValue>& args, int64_t begin, int64_t end,
const std::bitset<64> use_flag_relative, std::function<Tensor(const Tensor&, bool)> func);
std::vector<int64_t> findUnwrappedInputs(std::vector<IValue>& args, int64_t begin, int64_t end);
DispatchKeySet keysToExcludeWhenEnteringDynamicLayer(TransformType key);
void setup_dispatch_key_tls(TransformType key, DispatchKeySet include);
void sanityCheckStack(const c10::OperatorHandle& op, torch::jit::Stack* stack);
} // namespace at::functorch
| 7,874
| 36.679426
| 117
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/LegacyVmapTransforms.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/functorch/Macros.h>
#include <ATen/functorch/BatchedTensorImpl.h>
namespace at::functorch {
// This files contains the legacy (now-deprecated) batching rule API.
// Please try to use the new-style batching rule API (see writing_batch_rules.md)
// This file contains abstractions used for transforming *logical* vmap arguments
// into *physical* arguments. (Keep reading for definitions of these terms).
// NOTE: [Logical vs physical args]
// Consider the following vmap.
// vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4))
// This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4],
// with batch dims 0 and 2:
// BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)])
//
// We say the *logical* view of the tensor has size [3] -- tensors inside
// `func` appear to have size [3].
// However, the *physical* underlying tensor (the one passed to vmap) has size
// [2, 3, 4].
//
// This notion of logical vs physical also extends to non-tensor arguments.
// Consider the previous tensor; let's assume the user called
// `torch.sum(tensor, dim=0)` inside of `func`. Then the logical
// dimension they are reducing over is dim 0 but the physical dim is dim 1
// (the first non-batch dimension)
// Forward declared; see NOTE: [What is a VmapPhysicalView?]
struct VmapPhysicalView;
// Most PyTorch operators take 4 or fewer inputs.
constexpr int64_t kVmapTransformStaticInputSize = 4;
using VmapPhysicalViewVec = SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>;
// Pytorch generally advertises good performance for <= 5 dims.
// (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap
// dimensions to get 8. Adjust this number as necessary
constexpr int64_t kVmapStaticDimVecSize = 8;
using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>;
using VmapSymDimVector = SmallVector<c10::SymInt, kVmapStaticDimVecSize>;
// NOTE: [What is an VmapTransform?]
// An *VmapTransform* converts logical views of tensors to physical views.
//
// Batching rules use VmapTransforms to convert logical arguments to
// physical arguments, then call one or more at:: operator that handles the
// physical arguments, and then converts the physical result back to a logical
// argument.
// VmapTransform for operators that take tensors with multiple batch dims.
// Given one or more logical views on Tensors, `logicalToPhysical`
// permutes all of the batch dims to the front of the tensor, aligns
// and expands the batch dims to match each other (according to their `level`),
// and returns a VmapPhysicalView on the tensor(s).
struct TORCH_API MultiBatchVmapTransform {
static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor);
static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors);
};
// VmapTransform for operators that broadcast all inputs.
// Given some logical views on Tensors, `logicalToPhysical`:
// - permutes all of the batch dims to the front of the tensors
// - aligns all the batch dims to the collective levels of all of the tensors.
// If a tensor does not have a batch dim for a vmap level, then it receives
// a size-one dimension for said level.
// - aligns the non-batch dims to have the same dimensionality, adding extra
// size-1 dimensions in between the batch dimensions and the non-batch dimensions
// so that the batch dimensions are lined up from the right.
//
// For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch
// dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap tensors
// of size (B, 1, 2) and (B, 3, 2).
//
// Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns
// VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't
// actually *need* to return a tensor of size (1, 2) for the second tensor
// because the broadcasting operation takes care of that for us, but we do
// it anyways to keep things simple.
struct TORCH_API BroadcastingVmapTransform {
static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors);
};
// Forward declared, if you're reading this file head to toe, don't worry about
// it yet.
struct VmapPhysicalToLogicalMap;
// NOTE: [What is a VmapPhysicalView?]
// VmapPhysicalView represents a physical view on a Tensor.
//
// One can use it to further convert logical dimension indices, logical shapes,
// and more to their physical variants, or convert a new (physical) tensor into
// a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented).
//
// VmapPhysicalView stores a physical tensor with all of its batch dimensions at
// the front and some levels that correspond to said batch dimensions.
//
// The levels bitset specifies which vmap levels correspond to the batch
// dimensions at the front of the tensor. In particular, the number of set bits
// corresponds to the number of batch dimensions on `tensor` and the rightmost
// bit of `levels` specifies the maximum number of nested vmaps we are in at
// this point in time.
// For example, given:
// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3})
//
// Rightmost bit of `levels` is 3 indicating the number of nested vmaps less
// than or equal to 3.
// bitset: 010100
// ^
// |
// levels: 012345
struct TORCH_API VmapPhysicalView {
VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels)
: levels_(levels), tensor_(tensor) {
// TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor));
}
Tensor& tensor() { return tensor_; }
const Tensor& tensor() const { return tensor_; }
// Maps logical dim indices to physical dim indices. Also does dim wrapping.
//
// For example, given:
// physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3})
//
// Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}.
// This is because the size of levels tell us that the first two dimensions
// of `tensor_` are batch dimensions, so a logical dim of `n` is actually
// a physical dim of `n + 2`.
VmapDimVector getPhysicalDims(IntArrayRef logical_dims) const;
int64_t getPhysicalDim(int64_t logical_dim) const;
// Returns a VmapPhysicalToLogicalMap object. This can be used for
// mapping a physical tensor to a new logical tensor (BatchedTensor)
VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const;
// Maps a logical shape to a physical shape by pre-pending the batch
// sizes to the logical shape.
VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const;
SymDimVector getPhysicalShape(c10::SymIntArrayRef logical_shape) const;
int64_t numBatchDims() const;
private:
int64_t numLogicalDims() const;
std::bitset<kVmapNumLevels> levels_;
Tensor tensor_;
};
// Convenience struct used for mapping a physical tensor (a non-BatchedTensor)
// to a logical one (BatchedTensor). It holds some levels that are used to do the
// mapping and assumes that the batch dimensions in the physical tensor all
// occur at the front of the tensor.
struct TORCH_API VmapPhysicalToLogicalMap {
VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels): levels_(levels) {}
// Maps a physical tensor to a new logical tensor (BatchedTensor).
// Assumes that all of the "batch dimensions" are at the front
// of the physical tensor. For example, given:
// - x = rank-4 Tensor with size 2, 3, 5, 7
// - levels = (2, 4)
// Returns:
// - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)])
Tensor apply(const Tensor& physical_tensor) const;
// Given a vector of physical tensors,
// 1. maps each tensor to a new logical tensor. Assumes that all of the
// "batch dimensions" are at the front of the physical tensors.
// 2. stores the new logical tensors back into the passed-in vector. This is
// to avoid additional dynamic allocations.
void applyInplace(std::vector<Tensor>& physical_tensors) const;
std::bitset<kVmapNumLevels> levels_;
};
} // namespace at::functorch
| 8,241
| 42.840426
| 89
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/PlumbingHelper.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/Tensor.h>
#include <ATen/functorch/BatchedTensorImpl.h>
#include <ATen/functorch/DynamicLayer.h>
// NOTE: [vmap plumbing]
//
// Here's how "batching rules" work.
// - we register kernels to the Batched key
// - these kernels have the same signatures as the original operators.
// For example, at::sin(Tensor self) accepts a Tensor, and the batched kernel
// must also accept a Tensor
// - However, it is more natural for users to write a batching rule like the
// following: sin_batch_rule(Tensor self, optional<int> self_bdim)
// - There is some codegenerated layer (the "plumbing") that wraps the user
// defined batching rule (e.g. sin_batch_rule) in a kernel that can be
// registered to the Batched key.
//
// The plumbing is responsible for wrapping a batching rule into a form that may
// be registered as the kernel for the batched key.
namespace at::functorch {
void vmap_check_escaped(const optional<DynamicLayer> &layer, const char* what);
// Create a BatchedTensor given a tensor, bdim, and level
TORCH_API Tensor makeBatched(const Tensor& tensor, optional<int64_t> bdim, int64_t level);
// Given a Tensor that may or may not be a BatchedTensor, unwrap it.
// If `tensor` is not a BatchedTensor, or is a BatchedTensor but the level
// doesn't match, then this returns (tensor, nullopt).
// Otherwise, it returns (unwrap(tensor), bdim).
TORCH_API std::tuple<Tensor, c10::optional<int64_t>> unwrapTensorAtLevel(const Tensor& tensor, int64_t level);
// Creates a vector of BatchedTensor
TORCH_API std::vector<Tensor> makeBatchedVector(const std::vector<Tensor>& tensors, optional<int64_t> bdim, int64_t level);
// Returns True if ANY tensor in tensors is batched at level
TORCH_API bool isBatchedAtLevel(ITensorListRef tensors, int64_t level);
TORCH_API bool isBatchedAtLevel(const c10::List<c10::optional<Tensor>>& maybe_tensors, int64_t level);
TORCH_API bool isBatchedAtLevel(const Tensor& tensor, int64_t level);
TORCH_API bool isBatchedAtLevel(const c10::optional<Tensor>& maybe_tensor, int64_t level);
// Convenience helper. Returns true if any tensor is batched at level
TORCH_API bool areAnyBatchedAtLevel(ArrayRef<optional<Tensor>> maybe_tensors, int64_t level);
inline bool ivalueParticipatesInCurrentLevel(const IValue& ivalue) {
if (ivalue.isTensor()) {
auto maybe_level = maybeCurrentDynamicLayer();
TORCH_INTERNAL_ASSERT(maybe_level.has_value());
auto current_level = maybe_level->layerId();
return isBatchedAtLevel(ivalue.toTensor(), current_level);
}
// TODO: should really check this
return false;
}
} // namespace at::functorch
| 2,838
| 43.359375
| 123
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/TensorWrapper.h
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <ATen/functorch/Macros.h>
#include <ATen/Tensor.h>
#include <ATen/functorch/Interpreter.h>
namespace at::functorch {
// NOTE: [functorch's TensorWrapper]
//
// Taking better suggestions for a name. TensorWrapper is the wrapper Tensor
// Subclass for functorch's grad-based transforms (grad, vjp, jvp). It is
// analogous to how vmap uses BatchedTensor as the wrapper Tensor subclass.
//
// If you're familiar with the Tensor-Variable merge, TensorWrapper is effectively
// another Variable.
//
// Consider grad(grad(torch.sin))(x). This wraps `x` as TensorWrapper(TensorWrapper(x)).
// The reason why is so that each TensorWrapper can hold its own AutogradMeta and
// participate in a **separate** autograd graph.
//
// There are alternative designs we could have chosen (e.g. each grad transform
// stores a weak map of Tensor -> AutogradMeta); the benefit of the TensorWrapper
// design is that we can re-use existing VariableType kernels (i.e. Autograd kernels)
// without much modification. Since a TensorWrapper looks like a regular Tensor,
// the VariableType kernel can pull out the AutogradMeta struct from where it
// expects and extend the autograd graph
struct TORCH_API TensorWrapper : public c10::TensorImpl {
explicit TensorWrapper(
c10::DispatchKeySet key_set,
Tensor value,
int64_t level,
std::shared_ptr<bool> is_alive,
bool is_immutable = false, // if true, this came from an operation that aliases an immutable tensor
bool use_value_sizes_strides = true);
void refreshMetadata();
const Tensor& value() const {
return value_;
}
optional<int64_t> level() const {
if (is_alive()) {
return level_;
}
return {};
}
bool is_immutable() const {
return is_immutable_;
}
bool is_alive() const;
// Overrides necessary for autograd
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
const c10::VariableVersion& version_counter,
bool allow_tensor_metadata_change) const override;
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) const override;
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override;
private:
const char* tensorimpl_type_name() const override;
Tensor value_;
int64_t level_;
bool is_immutable_;
// TensorWrapper receives a boolean flag on whether or not the Grad Interpreter
// that created it is still alive or not.
// If the Grad Interpreter is no longer alive then it attempts to behave like
// a regular Tensor.
//
// When we exit the level, this wrapper may be marked as "not alive".
// Wrappers that are not alive:
// 1) May still have autograd metadata on them
// 2) Forward dispatches to the underlying value()
std::shared_ptr<bool> is_alive_;
};
// There are two variants of makeTensorWrapper: one that accepts a level
// and one that accepts an Interpreter.
//
// The one that accepts a level tries to automatically get the life handle from the
// interpreter on the DynamicLayerStack.
// It needs to be used with caution: if the interpreter is not on the
// DynamicLayerStack, then we won't be able to find the life handle.
//
// In practice this isn't a problem: when we're constructing TensorWrapper in
// Python, the corresponding interpreter is on the stack.
TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, int64_t level, bool is_immutable=false);
TORCH_API Tensor makeTensorWrapper(const Tensor& tensor, const Interpreter& interpreter, bool is_immutable=false);
TORCH_API TensorWrapper* maybeGetTensorWrapper(const Tensor& tensor);
TORCH_API void dumpTensor(std::ostream & ss, const Tensor& tensor);
TORCH_API void dumpTensorCout(const Tensor& tensor);
} // namespace at::functorch
| 4,021
| 37.673077
| 114
|
h
|
null |
pytorch-main/aten/src/ATen/functorch/VmapInterpreter.h
|
#pragma once
#include <ATen/functorch/Interpreter.h>
namespace at::functorch {
// This is the interpreter that handles the functionalize() transform.
// See NOTE: [functorch interpreter stack] for more details.
struct VmapInterpreterPtr {
explicit VmapInterpreterPtr(const Interpreter* base): base_(base) { TORCH_INTERNAL_ASSERT(base->key() == TransformType::Vmap); }
TransformType key() const { return base_->key(); }
int64_t level() const { return base_->level(); }
void processImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack);
void sendToNextInterpreterImpl(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool grad_special_case);
c10::SymInt batchSize() const {
return c10::get<VmapInterpreterMeta>(base_->meta()).batchSize_;
}
RandomnessType randomness() const {
return c10::get<VmapInterpreterMeta>(base_->meta()).randomness_;
}
private:
const Interpreter* base_;
};
} // namespace at::functorch
| 957
| 35.846154
| 130
|
h
|
null |
pytorch-main/aten/src/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h
|
#pragma once
#include <c10/core/Allocator.h>
#include <c10/core/DeviceType.h>
// Use of c10::hip namespace here makes hipification easier, because
// I don't have to also fix namespaces. Sorry!
namespace c10 { namespace hip {
// Takes a valid HIPAllocator (of any sort) and turns it into
// an allocator pretending to be a CUDA allocator. See
// Note [Masquerading as CUDA]
class HIPAllocatorMasqueradingAsCUDA final : public Allocator {
Allocator* allocator_;
public:
explicit HIPAllocatorMasqueradingAsCUDA(Allocator* allocator)
: allocator_(allocator) {}
DataPtr allocate(size_t size) const override {
DataPtr r = allocator_->allocate(size);
r.unsafe_set_device(Device(c10::DeviceType::CUDA, r.device().index()));
return r;
}
DeleterFnPtr raw_deleter() const override {
return allocator_->raw_deleter();
}
};
}} // namespace c10::hip
| 875
| 29.206897
| 75
|
h
|
null |
pytorch-main/aten/src/ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h
|
#pragma once
#include <c10/hip/HIPCachingAllocator.h>
#include <ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
namespace c10 {
// forward declaration
class DataPtr;
namespace hip {
namespace HIPCachingAllocatorMasqueradingAsCUDA {
C10_HIP_API Allocator* get();
C10_HIP_API void recordStreamMasqueradingAsCUDA(const DataPtr& ptr, HIPStreamMasqueradingAsCUDA stream);
} // namespace HIPCachingAllocatorMasqueradingAsCUDA
} // namespace hip
} // namespace c10
| 517
| 26.263158
| 104
|
h
|
null |
pytorch-main/aten/src/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h
|
#pragma once
#include <ATen/hip/HIPConfig.h>
// The includes of HIPGuard.h
#include <c10/hip/impl/HIPGuardImpl.h>
#include <c10/hip/HIPMacros.h>
#include <c10/core/DeviceType.h>
#include <c10/core/impl/InlineDeviceGuard.h>
#include <c10/core/impl/InlineStreamGuard.h>
#include <c10/util/Exception.h>
#include <c10/hip/impl/HIPGuardImpl.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
// Use of c10::hip namespace here makes hipification easier, because
// I don't have to also fix namespaces. Sorry!
namespace c10 { namespace hip {
// Note [Masquerading as CUDA]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
// c10_hip is very easy to understand: it is HIPified from c10_cuda,
// and anywhere you said CUDA, the source code now says HIP. HIPified
// PyTorch is much harder to understand: it is HIPified from regular
// PyTorch, yes, but NO source-to-source translation from CUDA to
// HIP occurs; instead, anywhere we see "CUDA", it actually means "HIP".
// For example, when you use HIPified PyTorch, you say x.cuda() to
// move a tensor onto ROCm device. We call this situation "HIP
// masquerading as CUDA".
//
// This leads to a very awkward situation when we want to call c10_hip
// code from PyTorch, since c10_hip is expecting things to be called
// HIP, but PyTorch is calling them CUDA (masquerading as HIP). To
// fix this impedance mismatch, we have MasqueradingAsCUDA variants
// for all c10_hip classes. These translate between the "HIP" and "CUDA
// masquerading as HIP" worlds. For example,
// HIPGuardImplMasqueradingAsCUDA (this file) provides something like a
// HIPGuardImpl, but it reports its DeviceType as CUDA (e.g., type()
// returns CUDA, getDevice() reports the current HIP device as a CUDA
// device.)
//
// We should be able to delete all of these classes entirely once
// we switch PyTorch to calling a HIP a HIP.
//
// When you add a new MasqueradingAsCUDA class/function, you need to
// also update the rewrite rules in torch/utils/hipify/cuda_to_hip_mappings.py
//
//
//
// By the way, note that the cpp file associated with this also
// *overwrites* the entry in the DeviceGuardImpl registry for CUDA with
// this HIP implementation.
struct HIPGuardImplMasqueradingAsCUDA final : public c10::impl::DeviceGuardImplInterface {
static constexpr c10::DeviceType static_type = c10::DeviceType::CUDA;
HIPGuardImplMasqueradingAsCUDA() {}
HIPGuardImplMasqueradingAsCUDA(c10::DeviceType t) {
TORCH_INTERNAL_ASSERT(t == c10::DeviceType::CUDA);
}
c10::DeviceType type() const override {
return c10::DeviceType::CUDA;
}
Device exchangeDevice(Device d) const override {
TORCH_INTERNAL_ASSERT(d.is_cuda());
Device old_device = getDevice();
if (old_device.index() != d.index()) {
C10_HIP_CHECK(hipSetDevice(d.index()));
}
return old_device;
}
Device getDevice() const override {
int device;
C10_HIP_CHECK(hipGetDevice(&device));
return Device(c10::DeviceType::CUDA, device);
}
void setDevice(Device d) const override {
TORCH_INTERNAL_ASSERT(d.is_cuda());
C10_HIP_CHECK(hipSetDevice(d.index()));
}
void uncheckedSetDevice(Device d) const noexcept override {
C10_HIP_CHECK_WARN(hipSetDevice(d.index()));
}
Stream getStream(Device d) const noexcept override {
return getCurrentHIPStreamMasqueradingAsCUDA(d.index()).unwrap();
}
Stream getDefaultStream(Device d) const override {
return getDefaultHIPStreamMasqueradingAsCUDA(d.index());
}
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) const override {
return getStreamFromPoolMasqueradingAsCUDA(isHighPriority, d.index());
}
Stream exchangeStream(Stream s) const noexcept override {
HIPStreamMasqueradingAsCUDA cs(s);
auto old_stream = getCurrentHIPStreamMasqueradingAsCUDA(s.device().index());
setCurrentHIPStreamMasqueradingAsCUDA(cs);
return old_stream.unwrap();
}
DeviceIndex deviceCount() const noexcept override {
int deviceCnt;
hipError_t _err;
_err = hipGetDeviceCount(&deviceCnt);
#if defined(USE_ROCM) && (ROCM_VERSION < 50201)
if(_err == hipErrorInvalidDevice)
return 0;
#endif
if(_err != hipErrorNoDevice && _err != hipSuccess)
C10_HIP_CHECK(_err);
return deviceCnt;
}
// Event-related functions
// Note: hipEventCreateWithFlags should be called on the same device as
// the recording stream's device.
void createEvent(
hipEvent_t* hip_event,
const EventFlag flag) const {
// Maps PyTorch's Event::Flag to HIP flag
auto hip_flag = hipEventDefault;
switch (flag) {
case EventFlag::PYTORCH_DEFAULT:
case EventFlag::HIP_EVENT_DISABLE_TIMING:
hip_flag = hipEventDisableTiming;
break;
case EventFlag::BACKEND_DEFAULT:
case EventFlag::HIP_EVENT_DEFAULT:
hip_flag = hipEventDefault;
break;
default:
TORCH_CHECK(false, "HIP event received unknown flag");
}
C10_HIP_CHECK(hipEventCreateWithFlags(hip_event, hip_flag));
}
void destroyEvent(
void* event,
const DeviceIndex device_index) const noexcept override {
if (!event) return;
auto hip_event = static_cast<hipEvent_t>(event);
int orig_device;
C10_HIP_CHECK_WARN(hipGetDevice(&orig_device));
C10_HIP_CHECK_WARN(hipSetDevice(device_index));
C10_HIP_CHECK_WARN(hipEventDestroy(hip_event));
C10_HIP_CHECK_WARN(hipSetDevice(orig_device));
}
void record(void** event,
const Stream& stream,
const DeviceIndex device_index,
const EventFlag flag) const override {
TORCH_CHECK(device_index == -1 || device_index == stream.device_index(),
"Event device index ",
device_index,
" does not match recording stream's device index ",
stream.device_index(),
".");
hipEvent_t hip_event = static_cast<hipEvent_t>(*event);
HIPStreamMasqueradingAsCUDA hip_stream{stream};
// Moves to stream's device to record
const auto orig_device = getDevice();
setDevice(stream.device());
// Creates the event (lazily)
if (!hip_event) createEvent(&hip_event, flag);
C10_HIP_CHECK(hipEventRecord(hip_event, hip_stream));
// Makes the void* point to the (possibly just allocated) HIP event
*event = hip_event;
// Resets device
setDevice(orig_device);
}
void block(
void* event,
const Stream& stream) const override {
if (!event) return;
hipEvent_t hip_event = static_cast<hipEvent_t>(event);
HIPStreamMasqueradingAsCUDA hip_stream{stream};
const auto orig_device = getDevice();
setDevice(stream.device());
C10_HIP_CHECK(hipStreamWaitEvent(
hip_stream,
hip_event,
/*flags (must be zero)=*/ 0));
setDevice(orig_device);
}
bool queryEvent(void* event) const override {
if (!event) return true;
hipEvent_t hip_event = static_cast<hipEvent_t>(event);
const hipError_t err = hipEventQuery(hip_event);
if (err != hipErrorNotReady) C10_HIP_CHECK(err);
else {
// ignore and clear the error if not ready
(void)hipGetLastError();
}
return (err == hipSuccess);
}
// Stream-related functions
bool queryStream(const Stream& stream) const override {
HIPStreamMasqueradingAsCUDA hip_stream{stream};
return hip_stream.query();
}
void synchronizeStream(const Stream& stream) const override {
HIPStreamMasqueradingAsCUDA hip_stream{stream};
hip_stream.synchronize();
}
void recordDataPtrOnStream(
const c10::DataPtr& data_ptr,
const Stream& stream) const override {
HIPStreamMasqueradingAsCUDA hip_stream{stream};
HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA(data_ptr, hip_stream);
}
};
// All of the guards which have HIPGuardImpl burned in need to also have
// variants using HIPGuardImplMasqueradingAsCUDA.
/// This code is all a direct copy from c10/cuda/HIPGuardMasqueradingAsCUDA.h, but with
/// the correct InlineDeviceGuard burned in. Sorry about the
/// copy-pasting.
struct HIPGuardMasqueradingAsCUDA {
explicit HIPGuardMasqueradingAsCUDA() = delete;
explicit HIPGuardMasqueradingAsCUDA(DeviceIndex device_index) : guard_(device_index) {}
explicit HIPGuardMasqueradingAsCUDA(Device device) : guard_(device) {}
HIPGuardMasqueradingAsCUDA(const HIPGuardMasqueradingAsCUDA&) = delete;
HIPGuardMasqueradingAsCUDA& operator=(const HIPGuardMasqueradingAsCUDA&) = delete;
HIPGuardMasqueradingAsCUDA(HIPGuardMasqueradingAsCUDA&& other) = delete;
HIPGuardMasqueradingAsCUDA& operator=(HIPGuardMasqueradingAsCUDA&& other) = delete;
void set_device(Device device) { guard_.set_device(device); }
void reset_device(Device device) { guard_.reset_device(device); }
void set_index(DeviceIndex device_index) { guard_.set_index(device_index); }
Device original_device() const { return guard_.original_device(); }
Device current_device() const { return guard_.current_device(); }
private:
c10::impl::InlineDeviceGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
};
struct OptionalHIPGuardMasqueradingAsCUDA {
explicit OptionalHIPGuardMasqueradingAsCUDA() : guard_() {}
explicit OptionalHIPGuardMasqueradingAsCUDA(optional<Device> device_opt) : guard_(device_opt) {}
explicit OptionalHIPGuardMasqueradingAsCUDA(optional<DeviceIndex> device_index_opt) : guard_(device_index_opt) {}
OptionalHIPGuardMasqueradingAsCUDA(const OptionalHIPGuardMasqueradingAsCUDA&) = delete;
OptionalHIPGuardMasqueradingAsCUDA& operator=(const OptionalHIPGuardMasqueradingAsCUDA&) = delete;
OptionalHIPGuardMasqueradingAsCUDA(OptionalHIPGuardMasqueradingAsCUDA&& other) = delete;
OptionalHIPGuardMasqueradingAsCUDA& operator=(OptionalHIPGuardMasqueradingAsCUDA&& other) = delete;
void set_device(Device device) { guard_.set_device(device); }
void reset_device(Device device) { guard_.reset_device(device); }
void set_index(DeviceIndex device_index) { guard_.set_index(device_index); }
optional<Device> original_device() const { return guard_.original_device(); }
optional<Device> current_device() const { return guard_.current_device(); }
void reset() { guard_.reset(); }
private:
c10::impl::InlineOptionalDeviceGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
};
struct HIPStreamGuardMasqueradingAsCUDA {
explicit HIPStreamGuardMasqueradingAsCUDA() = delete;
explicit HIPStreamGuardMasqueradingAsCUDA(Stream stream) : guard_(stream) {}
HIPStreamGuardMasqueradingAsCUDA(const HIPStreamGuardMasqueradingAsCUDA&) = delete;
HIPStreamGuardMasqueradingAsCUDA& operator=(const HIPStreamGuardMasqueradingAsCUDA&) = delete;
HIPStreamGuardMasqueradingAsCUDA(HIPStreamGuardMasqueradingAsCUDA&& other) = delete;
HIPStreamGuardMasqueradingAsCUDA& operator=(HIPStreamGuardMasqueradingAsCUDA&& other) = delete;
void reset_stream(Stream stream) { guard_.reset_stream(stream); }
HIPStreamMasqueradingAsCUDA original_stream() const {
return HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.original_stream());
}
HIPStreamMasqueradingAsCUDA current_stream() const {
return HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.current_stream());
}
Device current_device() const { return guard_.current_device(); }
Device original_device() const { return guard_.original_device(); }
private:
c10::impl::InlineStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
};
struct OptionalHIPStreamGuardMasqueradingAsCUDA {
explicit OptionalHIPStreamGuardMasqueradingAsCUDA() : guard_() {}
explicit OptionalHIPStreamGuardMasqueradingAsCUDA(Stream stream) : guard_(stream) {}
explicit OptionalHIPStreamGuardMasqueradingAsCUDA(optional<Stream> stream_opt) : guard_(stream_opt) {}
OptionalHIPStreamGuardMasqueradingAsCUDA(const OptionalHIPStreamGuardMasqueradingAsCUDA&) = delete;
OptionalHIPStreamGuardMasqueradingAsCUDA& operator=(const OptionalHIPStreamGuardMasqueradingAsCUDA&) = delete;
OptionalHIPStreamGuardMasqueradingAsCUDA(OptionalHIPStreamGuardMasqueradingAsCUDA&& other) = delete;
OptionalHIPStreamGuardMasqueradingAsCUDA& operator=(OptionalHIPStreamGuardMasqueradingAsCUDA&& other) = delete;
void reset_stream(Stream stream) { guard_.reset_stream(stream); }
optional<HIPStreamMasqueradingAsCUDA> original_stream() const {
auto r = guard_.original_stream();
if (r.has_value()) {
return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value()));
} else {
return nullopt;
}
}
optional<HIPStreamMasqueradingAsCUDA> current_stream() const {
auto r = guard_.current_stream();
if (r.has_value()) {
return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value()));
} else {
return nullopt;
}
}
void reset() { guard_.reset(); }
private:
c10::impl::InlineOptionalStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
};
struct HIPMultiStreamGuardMasqueradingAsCUDA {
explicit HIPMultiStreamGuardMasqueradingAsCUDA(ArrayRef<HIPStreamMasqueradingAsCUDA> streams)
: guard_(unwrapStreams(streams)) {}
HIPMultiStreamGuardMasqueradingAsCUDA(const HIPMultiStreamGuardMasqueradingAsCUDA&) = delete;
HIPMultiStreamGuardMasqueradingAsCUDA& operator=(const HIPMultiStreamGuardMasqueradingAsCUDA&) = delete;
HIPMultiStreamGuardMasqueradingAsCUDA(HIPMultiStreamGuardMasqueradingAsCUDA&& other) = delete;
HIPMultiStreamGuardMasqueradingAsCUDA& operator=(HIPMultiStreamGuardMasqueradingAsCUDA&& other) = delete;
private:
c10::impl::InlineMultiStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
static std::vector<Stream> unwrapStreams(ArrayRef<HIPStreamMasqueradingAsCUDA> hipStreams) {
std::vector<Stream> streams;
streams.reserve(hipStreams.size());
for (const HIPStreamMasqueradingAsCUDA& hipStream : hipStreams) {
streams.push_back(hipStream);
}
return streams;
}
};
}} // namespace c10::hip
| 13,949
| 38.40678
| 115
|
h
|
null |
pytorch-main/aten/src/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h
|
#pragma once
#include <c10/hip/HIPStream.h>
// Use of c10::hip namespace here makes hipification easier, because
// I don't have to also fix namespaces. Sorry!
namespace c10 { namespace hip {
// See Note [Masquerading as CUDA] for motivation
class HIPStreamMasqueradingAsCUDA {
public:
enum Unchecked { UNCHECKED };
explicit HIPStreamMasqueradingAsCUDA(Stream stream)
: HIPStreamMasqueradingAsCUDA(UNCHECKED, stream) {
// We did the coercion unchecked; check that it was right.
TORCH_CHECK(stream.device().is_cuda() /* !!! */);
}
explicit HIPStreamMasqueradingAsCUDA(Unchecked, Stream stream)
// Unsafely coerce the "CUDA" stream into a HIP stream
: stream_(
HIPStream(
Stream(
Stream::UNSAFE,
Device(c10::DeviceType::HIP, stream.device_index()),
stream.id())
)
) {}
// New constructor, just for this. Does NOT coerce.
explicit HIPStreamMasqueradingAsCUDA(HIPStream stream) : stream_(stream) {}
bool operator==(const HIPStreamMasqueradingAsCUDA& other) const noexcept {
return stream_ == other.stream_;
}
bool operator!=(const HIPStreamMasqueradingAsCUDA& other) const noexcept {
return stream_ != other.stream_;
}
operator hipStream_t() const { return stream_.stream(); }
operator Stream() const {
// Unsafely coerce HIP stream into a "CUDA" stream
return Stream(Stream::UNSAFE, device(), id());
}
DeviceIndex device_index() const { return stream_.device_index(); }
// Unsafely coerce HIP device into CUDA device
c10::DeviceType device_type() const { return c10::DeviceType::CUDA; }
Device device() const {
// Unsafely coerce HIP device into CUDA device
return Device(c10::DeviceType::CUDA, stream_.device_index());
}
StreamId id() const { return stream_.id(); }
bool query() const { return stream_.query(); }
void synchronize() const { stream_.synchronize(); }
int priority() const { return stream_.priority(); }
hipStream_t stream() const { return stream_.stream(); }
Stream unwrap() const {
// Unsafely coerce HIP stream into "CUDA" stream
return Stream(Stream::UNSAFE, device(), id());
}
c10::StreamData3 pack3() const noexcept {
// Unsafely coerce HIP stream into "CUDA" stream before packing
return unwrap().pack3();
}
static HIPStreamMasqueradingAsCUDA unpack3(StreamId stream_id,
DeviceIndex device_index,
c10::DeviceType device_type) {
// NB: constructor manages CUDA->HIP translation for us
return HIPStreamMasqueradingAsCUDA(Stream::unpack3(
stream_id, device_index, device_type));
}
static std::tuple<int, int> priority_range() { return HIPStream::priority_range(); }
// New method, gets the underlying HIPStream
HIPStream hip_stream() const { return stream_; }
private:
HIPStream stream_;
};
HIPStreamMasqueradingAsCUDA
inline getStreamFromPoolMasqueradingAsCUDA(const bool isHighPriority = false, DeviceIndex device = -1) {
return HIPStreamMasqueradingAsCUDA(getStreamFromPool(isHighPriority, device));
}
HIPStreamMasqueradingAsCUDA
inline getStreamFromExternalMasqueradingAsCUDA(hipStream_t ext_stream, DeviceIndex device) {
return HIPStreamMasqueradingAsCUDA(getStreamFromExternal(ext_stream, device));
}
inline HIPStreamMasqueradingAsCUDA getDefaultHIPStreamMasqueradingAsCUDA(DeviceIndex device_index = -1) {
return HIPStreamMasqueradingAsCUDA(getDefaultHIPStream(device_index));
}
inline HIPStreamMasqueradingAsCUDA getCurrentHIPStreamMasqueradingAsCUDA(DeviceIndex device_index = -1) {
return HIPStreamMasqueradingAsCUDA(getCurrentHIPStream(device_index));
}
inline void setCurrentHIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA stream) {
setCurrentHIPStream(stream.hip_stream());
}
inline std::ostream& operator<<(std::ostream& stream, const HIPStreamMasqueradingAsCUDA& s) {
stream << s.hip_stream() << " (masquerading as CUDA)";
return stream;
}
}} // namespace c10::hip
namespace std {
template <>
struct hash<c10::hip::HIPStreamMasqueradingAsCUDA> {
size_t operator()(c10::hip::HIPStreamMasqueradingAsCUDA s) const noexcept {
return std::hash<c10::Stream>{}(s.unwrap());
}
};
} // namespace std
| 4,318
| 31.969466
| 105
|
h
|
null |
pytorch-main/aten/src/ATen/miopen/Exceptions.h
|
#pragma once
#include <ATen/miopen/miopen-wrapper.h>
#include <string>
#include <stdexcept>
#include <sstream>
namespace at { namespace native {
class miopen_exception : public std::runtime_error {
public:
miopenStatus_t status;
miopen_exception(miopenStatus_t status, const char* msg)
: std::runtime_error(msg)
, status(status) {}
miopen_exception(miopenStatus_t status, const std::string& msg)
: std::runtime_error(msg)
, status(status) {}
};
inline void MIOPEN_CHECK(miopenStatus_t status)
{
if (status != miopenStatusSuccess) {
if (status == miopenStatusNotImplemented) {
throw miopen_exception(status, std::string(miopenGetErrorString(status)) +
". This error may appear if you passed in a non-contiguous input.");
}
throw miopen_exception(status, miopenGetErrorString(status));
}
}
inline void HIP_CHECK(hipError_t error)
{
if (error != hipSuccess) {
std::string msg("HIP error: ");
msg += hipGetErrorString(error);
throw std::runtime_error(msg);
}
}
}} // namespace at::native
| 1,076
| 24.642857
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/mkl/Descriptors.h
|
#pragma once
#include <ATen/mkl/Exceptions.h>
#include <mkl_dfti.h>
#include <ATen/Tensor.h>
namespace at { namespace native {
struct DftiDescriptorDeleter {
void operator()(DFTI_DESCRIPTOR* desc) {
if (desc != nullptr) {
MKL_DFTI_CHECK(DftiFreeDescriptor(&desc));
}
}
};
class DftiDescriptor {
public:
void init(DFTI_CONFIG_VALUE precision, DFTI_CONFIG_VALUE signal_type, MKL_LONG signal_ndim, MKL_LONG* sizes) {
if (desc_ != nullptr) {
throw std::runtime_error("DFTI DESCRIPTOR can only be initialized once");
}
DFTI_DESCRIPTOR *raw_desc;
if (signal_ndim == 1) {
MKL_DFTI_CHECK(DftiCreateDescriptor(&raw_desc, precision, signal_type, 1, sizes[0]));
} else {
MKL_DFTI_CHECK(DftiCreateDescriptor(&raw_desc, precision, signal_type, signal_ndim, sizes));
}
desc_.reset(raw_desc);
}
DFTI_DESCRIPTOR *get() const {
if (desc_ == nullptr) {
throw std::runtime_error("DFTI DESCRIPTOR has not been initialized");
}
return desc_.get();
}
private:
std::unique_ptr<DFTI_DESCRIPTOR, DftiDescriptorDeleter> desc_;
};
}} // at::native
| 1,122
| 23.955556
| 112
|
h
|
null |
pytorch-main/aten/src/ATen/mkl/Exceptions.h
|
#pragma once
#include <string>
#include <stdexcept>
#include <sstream>
#include <mkl_dfti.h>
#include <mkl_spblas.h>
namespace at { namespace native {
static inline void MKL_DFTI_CHECK(MKL_INT status)
{
if (status && !DftiErrorClass(status, DFTI_NO_ERROR)) {
std::ostringstream ss;
ss << "MKL FFT error: " << DftiErrorMessage(status);
throw std::runtime_error(ss.str());
}
}
}} // namespace at::native
namespace at {
namespace mkl {
namespace sparse {
static inline const char* _mklGetErrorString(sparse_status_t status) {
if (status == SPARSE_STATUS_SUCCESS) {
return "SPARSE_STATUS_SUCCESS";
}
if (status == SPARSE_STATUS_NOT_INITIALIZED) {
return "SPARSE_STATUS_NOT_INITIALIZED";
}
if (status == SPARSE_STATUS_ALLOC_FAILED) {
return "SPARSE_STATUS_ALLOC_FAILED";
}
if (status == SPARSE_STATUS_INVALID_VALUE) {
return "SPARSE_STATUS_INVALID_VALUE";
}
if (status == SPARSE_STATUS_EXECUTION_FAILED) {
return "SPARSE_STATUS_EXECUTION_FAILED";
}
if (status == SPARSE_STATUS_INTERNAL_ERROR) {
return "SPARSE_STATUS_INTERNAL_ERROR";
}
if (status == SPARSE_STATUS_NOT_SUPPORTED) {
return "SPARSE_STATUS_NOT_SUPPORTED";
}
return "<unknown>";
}
} // namespace sparse
} // namespace mkl
} // namespace at
#define TORCH_MKLSPARSE_CHECK(EXPR) \
do { \
sparse_status_t __err = EXPR; \
TORCH_CHECK( \
__err == SPARSE_STATUS_SUCCESS, \
"MKL error: ", \
at::mkl::sparse::_mklGetErrorString(__err), \
" when calling `" #EXPR "`"); \
} while (0)
| 1,724
| 26.822581
| 70
|
h
|
null |
pytorch-main/aten/src/ATen/mkl/SparseBlas.h
|
#pragma once
/*
Provides a subset of MKL Sparse BLAS functions as templates:
mv<scalar_t>(operation, alpha, A, descr, x, beta, y)
where scalar_t is double, float, c10::complex<double> or c10::complex<float>.
The functions are available in at::mkl::sparse namespace.
*/
#include <c10/util/Exception.h>
#include <c10/util/complex.h>
#include <mkl_spblas.h>
namespace at {
namespace mkl {
namespace sparse {
#define MKL_SPARSE_CREATE_CSR_ARGTYPES(scalar_t) \
sparse_matrix_t *A, const sparse_index_base_t indexing, const MKL_INT rows, \
const MKL_INT cols, MKL_INT *rows_start, MKL_INT *rows_end, \
MKL_INT *col_indx, scalar_t *values
template <typename scalar_t>
inline void create_csr(MKL_SPARSE_CREATE_CSR_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::create_csr: not implemented for ",
typeid(scalar_t).name());
}
template <>
void create_csr<float>(MKL_SPARSE_CREATE_CSR_ARGTYPES(float));
template <>
void create_csr<double>(MKL_SPARSE_CREATE_CSR_ARGTYPES(double));
template <>
void create_csr<c10::complex<float>>(
MKL_SPARSE_CREATE_CSR_ARGTYPES(c10::complex<float>));
template <>
void create_csr<c10::complex<double>>(
MKL_SPARSE_CREATE_CSR_ARGTYPES(c10::complex<double>));
#define MKL_SPARSE_CREATE_BSR_ARGTYPES(scalar_t) \
sparse_matrix_t *A, const sparse_index_base_t indexing, \
const sparse_layout_t block_layout, const MKL_INT rows, \
const MKL_INT cols, MKL_INT block_size, MKL_INT *rows_start, \
MKL_INT *rows_end, MKL_INT *col_indx, scalar_t *values
template <typename scalar_t>
inline void create_bsr(MKL_SPARSE_CREATE_BSR_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::create_bsr: not implemented for ",
typeid(scalar_t).name());
}
template <>
void create_bsr<float>(MKL_SPARSE_CREATE_BSR_ARGTYPES(float));
template <>
void create_bsr<double>(MKL_SPARSE_CREATE_BSR_ARGTYPES(double));
template <>
void create_bsr<c10::complex<float>>(
MKL_SPARSE_CREATE_BSR_ARGTYPES(c10::complex<float>));
template <>
void create_bsr<c10::complex<double>>(
MKL_SPARSE_CREATE_BSR_ARGTYPES(c10::complex<double>));
#define MKL_SPARSE_MV_ARGTYPES(scalar_t) \
const sparse_operation_t operation, const scalar_t alpha, \
const sparse_matrix_t A, const struct matrix_descr descr, \
const scalar_t *x, const scalar_t beta, scalar_t *y
template <typename scalar_t>
inline void mv(MKL_SPARSE_MV_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::mv: not implemented for ",
typeid(scalar_t).name());
}
template <>
void mv<float>(MKL_SPARSE_MV_ARGTYPES(float));
template <>
void mv<double>(MKL_SPARSE_MV_ARGTYPES(double));
template <>
void mv<c10::complex<float>>(MKL_SPARSE_MV_ARGTYPES(c10::complex<float>));
template <>
void mv<c10::complex<double>>(MKL_SPARSE_MV_ARGTYPES(c10::complex<double>));
#define MKL_SPARSE_ADD_ARGTYPES(scalar_t) \
const sparse_operation_t operation, const sparse_matrix_t A, \
const scalar_t alpha, const sparse_matrix_t B, sparse_matrix_t *C
template <typename scalar_t>
inline void add(MKL_SPARSE_ADD_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::add: not implemented for ",
typeid(scalar_t).name());
}
template <>
void add<float>(MKL_SPARSE_ADD_ARGTYPES(float));
template <>
void add<double>(MKL_SPARSE_ADD_ARGTYPES(double));
template <>
void add<c10::complex<float>>(MKL_SPARSE_ADD_ARGTYPES(c10::complex<float>));
template <>
void add<c10::complex<double>>(MKL_SPARSE_ADD_ARGTYPES(c10::complex<double>));
#define MKL_SPARSE_EXPORT_CSR_ARGTYPES(scalar_t) \
const sparse_matrix_t source, sparse_index_base_t *indexing, MKL_INT *rows, \
MKL_INT *cols, MKL_INT **rows_start, MKL_INT **rows_end, \
MKL_INT **col_indx, scalar_t **values
template <typename scalar_t>
inline void export_csr(MKL_SPARSE_EXPORT_CSR_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::export_csr: not implemented for ",
typeid(scalar_t).name());
}
template <>
void export_csr<float>(MKL_SPARSE_EXPORT_CSR_ARGTYPES(float));
template <>
void export_csr<double>(MKL_SPARSE_EXPORT_CSR_ARGTYPES(double));
template <>
void export_csr<c10::complex<float>>(
MKL_SPARSE_EXPORT_CSR_ARGTYPES(c10::complex<float>));
template <>
void export_csr<c10::complex<double>>(
MKL_SPARSE_EXPORT_CSR_ARGTYPES(c10::complex<double>));
#define MKL_SPARSE_MM_ARGTYPES(scalar_t) \
const sparse_operation_t operation, const scalar_t alpha, \
const sparse_matrix_t A, const struct matrix_descr descr, \
const sparse_layout_t layout, const scalar_t *B, const MKL_INT columns, \
const MKL_INT ldb, const scalar_t beta, scalar_t *C, const MKL_INT ldc
template <typename scalar_t>
inline void mm(MKL_SPARSE_MM_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::mm: not implemented for ",
typeid(scalar_t).name());
}
template <>
void mm<float>(MKL_SPARSE_MM_ARGTYPES(float));
template <>
void mm<double>(MKL_SPARSE_MM_ARGTYPES(double));
template <>
void mm<c10::complex<float>>(MKL_SPARSE_MM_ARGTYPES(c10::complex<float>));
template <>
void mm<c10::complex<double>>(MKL_SPARSE_MM_ARGTYPES(c10::complex<double>));
#define MKL_SPARSE_SPMMD_ARGTYPES(scalar_t) \
const sparse_operation_t operation, const sparse_matrix_t A, \
const sparse_matrix_t B, const sparse_layout_t layout, scalar_t *C, \
const MKL_INT ldc
template <typename scalar_t>
inline void spmmd(MKL_SPARSE_SPMMD_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::spmmd: not implemented for ",
typeid(scalar_t).name());
}
template <>
void spmmd<float>(MKL_SPARSE_SPMMD_ARGTYPES(float));
template <>
void spmmd<double>(MKL_SPARSE_SPMMD_ARGTYPES(double));
template <>
void spmmd<c10::complex<float>>(MKL_SPARSE_SPMMD_ARGTYPES(c10::complex<float>));
template <>
void spmmd<c10::complex<double>>(
MKL_SPARSE_SPMMD_ARGTYPES(c10::complex<double>));
#define MKL_SPARSE_TRSV_ARGTYPES(scalar_t) \
const sparse_operation_t operation, const scalar_t alpha, \
const sparse_matrix_t A, const struct matrix_descr descr, \
const scalar_t *x, scalar_t *y
template <typename scalar_t>
inline void trsv(MKL_SPARSE_TRSV_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::trsv: not implemented for ",
typeid(scalar_t).name());
}
template <>
void trsv<float>(MKL_SPARSE_TRSV_ARGTYPES(float));
template <>
void trsv<double>(MKL_SPARSE_TRSV_ARGTYPES(double));
template <>
void trsv<c10::complex<float>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<float>));
template <>
void trsv<c10::complex<double>>(MKL_SPARSE_TRSV_ARGTYPES(c10::complex<double>));
#define MKL_SPARSE_TRSM_ARGTYPES(scalar_t) \
const sparse_operation_t operation, const scalar_t alpha, \
const sparse_matrix_t A, const struct matrix_descr descr, \
const sparse_layout_t layout, const scalar_t *x, const MKL_INT columns, \
const MKL_INT ldx, scalar_t *y, const MKL_INT ldy
template <typename scalar_t>
inline void trsm(MKL_SPARSE_TRSM_ARGTYPES(scalar_t)) {
TORCH_INTERNAL_ASSERT(
false,
"at::mkl::sparse::trsm: not implemented for ",
typeid(scalar_t).name());
}
template <>
void trsm<float>(MKL_SPARSE_TRSM_ARGTYPES(float));
template <>
void trsm<double>(MKL_SPARSE_TRSM_ARGTYPES(double));
template <>
void trsm<c10::complex<float>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<float>));
template <>
void trsm<c10::complex<double>>(MKL_SPARSE_TRSM_ARGTYPES(c10::complex<double>));
} // namespace sparse
} // namespace mkl
} // namespace at
| 8,007
| 33.666667
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/mkl/SparseDescriptors.h
|
#pragma once
/*
Provides templated descriptor wrappers of MKL Sparse BLAS sparse matrices:
MklSparseCsrDescriptor<scalar_t>(sparse_csr_tensor)
where scalar_t is double, float, c10::complex<double> or c10::complex<float>.
The descriptors are available in at::mkl::sparse namespace.
*/
#include <ATen/Tensor.h>
#include <ATen/mkl/Exceptions.h>
#include <ATen/mkl/Utils.h>
#include <c10/core/ScalarType.h>
#include <c10/util/MaybeOwned.h>
#include <mkl_spblas.h>
namespace at {
namespace mkl {
namespace sparse {
template <typename T, sparse_status_t (*destructor)(T*)>
struct MklSparseDescriptorDeleter {
void operator()(T* x) {
if (x != nullptr) {
TORCH_MKLSPARSE_CHECK(destructor(x));
}
}
};
template <typename T, sparse_status_t (*destructor)(T*)>
class MklSparseDescriptor {
public:
T* descriptor() const {
return descriptor_.get();
}
T* descriptor() {
return descriptor_.get();
}
protected:
std::unique_ptr<T, MklSparseDescriptorDeleter<T, destructor>> descriptor_;
};
namespace {
c10::MaybeOwned<Tensor> inline prepare_indices_for_mkl(const Tensor& indices) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
isIntegralType(indices.scalar_type(), /*includeBool=*/false));
#ifdef MKL_ILP64
// ILP64 is a 64-bit API version of MKL
// Indices tensor must have ScalarType::Long type
if (indices.scalar_type() == ScalarType::Long) {
return c10::MaybeOwned<Tensor>::borrowed(indices);
} else {
return c10::MaybeOwned<Tensor>::owned(indices.to(ScalarType::Long));
}
#else
// LP64 is a 32-bit API version of MKL
// Indices tensor must have ScalarType::Int type
if (indices.scalar_type() == ScalarType::Int) {
return c10::MaybeOwned<Tensor>::borrowed(indices);
} else {
return c10::MaybeOwned<Tensor>::owned(indices.to(ScalarType::Int));
}
#endif
}
} // anonymous namespace
template <typename scalar_t>
class MklSparseCsrDescriptor
: public MklSparseDescriptor<sparse_matrix, &mkl_sparse_destroy> {
public:
MklSparseCsrDescriptor(const Tensor& input) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY((input.layout() == kSparseCsr || input.layout() == kSparseBsr));
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input.dim() == 2);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
input._nnz() > 0, "MKL doesn't work with empty CSR matrices");
IntArrayRef input_sizes = input.sizes();
auto rows = mkl_int_cast(input_sizes[0], "rows");
auto cols = mkl_int_cast(input_sizes[1], "cols");
auto crow_indices = input.crow_indices();
auto col_indices = input.col_indices();
auto values = input.values();
crow_indices_ = prepare_indices_for_mkl(crow_indices);
col_indices_ = prepare_indices_for_mkl(col_indices);
values_ = values.expect_contiguous();
auto values_ptr = values_->data_ptr<scalar_t>();
auto crow_indices_ptr = crow_indices_->data_ptr<MKL_INT>();
auto col_indices_ptr = col_indices_->data_ptr<MKL_INT>();
sparse_matrix_t raw_descriptor;
if (input.layout() == kSparseBsr) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
values.dim() == 3 && crow_indices.dim() == 1 &&
col_indices.dim() == 1);
TORCH_CHECK(
values.size(-1) == values.size(-2),
"MKL Sparse doesn't support matrices with non-square blocks.");
auto block_size = mkl_int_cast(values.size(-1), "block_size");
create_bsr<scalar_t>(
&raw_descriptor,
SPARSE_INDEX_BASE_ZERO,
SPARSE_LAYOUT_ROW_MAJOR,
rows / block_size,
cols / block_size,
block_size,
crow_indices_ptr,
crow_indices_ptr + 1,
col_indices_ptr,
values_ptr);
} else {
create_csr<scalar_t>(
&raw_descriptor,
SPARSE_INDEX_BASE_ZERO,
rows,
cols,
crow_indices_ptr,
crow_indices_ptr + 1,
col_indices_ptr,
values_ptr);
}
descriptor_.reset(raw_descriptor);
}
MklSparseCsrDescriptor() {
sparse_matrix_t raw_descriptor = nullptr;
descriptor_.reset(raw_descriptor);
}
private:
c10::MaybeOwned<Tensor> crow_indices_;
c10::MaybeOwned<Tensor> col_indices_;
c10::MaybeOwned<Tensor> values_;
};
} // namespace sparse
} // namespace mkl
} // namespace at
| 4,268
| 27.271523
| 101
|
h
|
null |
pytorch-main/aten/src/ATen/mps/EmptyTensor.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#include <ATen/core/TensorBase.h>
namespace at {
namespace detail {
C10_EXPORT TensorBase empty_mps(
IntArrayRef size,
c10::optional<ScalarType> dtype_opt,
c10::optional<Layout> layout_opt,
c10::optional<Device> device_opt,
c10::optional<bool> pin_memory_opt,
c10::optional<c10::MemoryFormat> memory_format_opt);
C10_EXPORT TensorBase empty_mps(
IntArrayRef size, const TensorOptions &options);
C10_EXPORT TensorBase empty_strided_mps(
IntArrayRef size,
IntArrayRef stride,
ScalarType dtype,
c10::optional<Device> device_opt);
C10_EXPORT TensorBase empty_strided_mps(
IntArrayRef size,
IntArrayRef stride,
const TensorOptions &options);
} // namespace detail
} // namespace at
| 784
| 23.53125
| 56
|
h
|
null |
pytorch-main/aten/src/ATen/mps/MPSAllocatorInterface.h
|
// Copyright © 2023 Apple Inc.
#pragma once
#include <c10/core/Allocator.h>
#include <c10/util/Registry.h>
#include <ATen/core/ATen_fwd.h>
namespace at { namespace mps {
// this is a public interface to access MPSAllocator.
// Do not declare methods that would depend on MPS or Metal frameworks.
class IMPSAllocator : public c10::Allocator {
public:
// see the comments in MPSAllocator.h for the description of these methods.
virtual void emptyCache() const = 0;
virtual ssize_t getUnalignedBufferSize(const void* ptr) const = 0;
virtual IntArrayRef getBufferShape(const void* ptr) const = 0;
virtual id_t getBufferId(const void* ptr) const = 0;
virtual void setBufferShape(const void* ptr, const IntArrayRef& shape) const = 0;
virtual bool isSharedBuffer(const void* ptr) const = 0;
virtual bool isSharedStorageSupported() const = 0;
virtual c10::DataPtr allocScalarBufferWithValue(void* value, size_t size) const = 0;
virtual std::string formatSize(size_t size) const = 0;
virtual void setLowWatermarkRatio(double ratio) const = 0;
virtual void setHighWatermarkRatio(double ratio) const = 0;
virtual ssize_t getLowWatermarkValue() const = 0;
virtual size_t getLowWatermarkLimit() const = 0;
virtual size_t getHighWatermarkLimit() const = 0;
virtual size_t getTotalAllocatedMemory() const = 0;
virtual size_t getCurrentAllocatedMemory() const = 0;
virtual size_t getDriverAllocatedMemory() const = 0;
};
class IMpsAllocatorCallback {
public:
enum class EventType {
ALLOCATED, // buffer got allocated to be used immediately
RECYCLED, // buffer pulled from free list to be reused
FREED, // buffer put to free list for future recycling
RELEASED, // buffer memory released
ALLOCATION_FAILED // buffer allocation failed
};
virtual ~IMpsAllocatorCallback() = default;
virtual void executeMPSAllocatorCallback(void* ptr, EventType event) = 0;
};
// MPS allocator will execute every registered callback when a block of memory is freed.
C10_DECLARE_REGISTRY(MPSAllocatorCallbacksRegistry, IMpsAllocatorCallback);
#define REGISTER_MPS_ALLOCATOR_CALLBACK(name, ...) \
C10_REGISTER_CLASS(MPSAllocatorCallbacksRegistry, name, __VA_ARGS__);
IMPSAllocator* getIMPSAllocator(bool sharedAllocator = false);
}} // namespace at::mps
| 2,297
| 40.035714
| 88
|
h
|
null |
pytorch-main/aten/src/ATen/mps/MPSDevice.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#include <c10/core/Allocator.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#ifdef __OBJC__
#include <Foundation/Foundation.h>
#include <Metal/Metal.h>
#include <MetalPerformanceShaders/MetalPerformanceShaders.h>
typedef id<MTLDevice> MTLDevice_t;
typedef id<MTLLibrary> MTLLibrary_t;
typedef id<MTLComputePipelineState> MTLComputePipelineState_t;
typedef id<MTLLibrary> MTLLibrary_t;
#else
typedef void* MTLDevice;
typedef void* MTLDevice_t;
typedef void* MTLLibrary_t;
typedef void* MTLComputePipelineState_t;
typedef void* MTLLibrary_t;
#endif
using namespace std;
namespace at {
namespace mps {
// Helper enum to check if a MPSGraph op is supported in a given macOS version
enum class MacOSVersion : uint32_t {
MACOS_VER_13_0_PLUS = 0,
MACOS_VER_13_1_PLUS,
MACOS_VER_13_2_PLUS,
MACOS_VER_13_3_PLUS,
};
//-----------------------------------------------------------------
// MPSDevice
//
// MPSDevice is a singleton class that returns the default device
//-----------------------------------------------------------------
class TORCH_API MPSDevice {
public:
/**
* MPSDevice should not be cloneable.
*/
MPSDevice(MPSDevice& other) = delete;
/**
* MPSDevice should not be assignable.
*/
void operator=(const MPSDevice&) = delete;
/**
* Gets single instance of the Device.
*/
static MPSDevice* getInstance();
/**
* Returns the single device.
*/
MTLDevice_t device() {
return _mtl_device;
}
/**
* Returns whether running on Ventura or newer
*/
bool isMacOS13Plus(MacOSVersion version) const;
MTLComputePipelineState_t metalIndexingPSO(const std::string &kernel);
MTLLibrary_t getMetalIndexingLibrary();
~MPSDevice();
private:
static MPSDevice* _device;
MTLDevice_t _mtl_device;
MTLLibrary_t _mtl_indexing_library;
MPSDevice();
};
TORCH_API bool is_available();
TORCH_API bool is_macos_13_or_newer(MacOSVersion version = MacOSVersion::MACOS_VER_13_0_PLUS);
TORCH_API at::Allocator* GetMPSAllocator(bool useSharedAllocator = false);
} // namespace mps
} // namespace at
| 2,124
| 23.425287
| 94
|
h
|
null |
pytorch-main/aten/src/ATen/mps/MPSGeneratorImpl.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#include <ATen/core/Generator.h>
#include <ATen/core/PhiloxRNGEngine.h>
#include <c10/core/GeneratorImpl.h>
#include <c10/util/Optional.h>
namespace at {
namespace mps {
namespace detail {
static const uint32_t PHILOX_STATE_N = 7;
struct rng_data_pod {
std::array<uint32_t, PHILOX_STATE_N> state{1};
uint64_t seed = default_rng_seed_val;
};
TORCH_API const Generator& getDefaultMPSGenerator();
TORCH_API Generator createMPSGenerator(uint64_t seed_val = default_rng_seed_val);
} // namespace detail
} // namespace mps
struct TORCH_API MPSGeneratorImpl : public c10::GeneratorImpl {
// Constructors
MPSGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
~MPSGeneratorImpl() override = default;
// MPSGeneratorImpl methods
std::shared_ptr<MPSGeneratorImpl> clone() const;
void set_current_seed(uint64_t seed) override;
void set_offset(uint64_t offset) override;
uint64_t get_offset() const override;
uint64_t current_seed() const override;
uint64_t seed() override;
void set_state(const c10::TensorImpl& new_state) override;
c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
void update_philox_counters();
void set_engine(at::Philox4_32 engine) { engine_ = engine; };
at::Philox4_32 engine() { return engine_; };
uint32_t* state_data() { return data_.state.data(); }
static DeviceType device_type() { return DeviceType::MPS; };
private:
mps::detail::rng_data_pod data_;
at::Philox4_32 engine_;
MPSGeneratorImpl* clone_impl() const override;
};
} // namespace at
| 1,578
| 27.709091
| 81
|
h
|
null |
pytorch-main/aten/src/ATen/mps/MPSGuardImpl.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <ATen/Context.h>
#include <ATen/mps/MPSStream.h>
#ifdef __OBJC__
#include <Foundation/Foundation.h>
#include <Metal/Metal.h>
#include <MetalPerformanceShaders/MetalPerformanceShaders.h>
#endif
#include <ATen/Tensor.h>
#include <c10/core/MemoryFormat.h>
#include <c10/core/Storage.h>
#include <c10/core/TensorImpl.h>
#include <sys/_types/_size_t.h>
#include <memory>
#include <c10/core/UndefinedTensorImpl.h>
#include <c10/util/intrusive_ptr.h>
namespace at {
namespace mps {
// TODO: Move the MPSGuardImpl to inherit from NoOpDeviceGuardImpl
// https://github.com/pytorch/pytorch/issues/77170
struct TORCH_API MPSGuardImpl final : public c10::impl::DeviceGuardImplInterface {
static constexpr c10::DeviceType static_type = c10::DeviceType::MPS;
// constructor
MPSGuardImpl() {}
explicit MPSGuardImpl(c10::DeviceType t) {
TORCH_INTERNAL_ASSERT(t == c10::DeviceType::MPS);
}
// returns the type
c10::DeviceType type() const override {
return c10::DeviceType::MPS;
}
Device exchangeDevice(Device d) const override {
return Device(c10::DeviceType::MPS, 0);
}
Device getDevice() const override {
return Device(c10::DeviceType::MPS, 0);
}
c10::optional<Device> uncheckedGetDevice() const noexcept {
return Device(c10::DeviceType::MPS, 0);
}
void setDevice(Device d) const override {
TORCH_INTERNAL_ASSERT(d.is_mps());
}
void uncheckedSetDevice(Device d) const noexcept override {
// TODO: Currently setting only device 0
}
Stream getStream(Device d) const noexcept override {
return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
}
Stream getDefaultStream(Device d) const override {
return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
}
// NB: These do NOT set the current device
Stream exchangeStream(Stream s) const noexcept override {
return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
}
DeviceIndex deviceCount() const noexcept override {
if (at::hasMPS()) {
//TODO: extend it for multi-device case
return 1;
} else {
return 0;
}
}
// Event-related functions
void createEvent(
mpsEvent_t* event,
const EventFlag flag) const;
void destroyEvent(
void* event,
const DeviceIndex device_index) const noexcept override;
void record(
void** event,
const Stream& stream,
const DeviceIndex device_index,
const EventFlag flag) const override;
void block(
void* event,
const Stream& stream) const override;
bool queryEvent(void* event) const override;
};
/// A variant of OptionalDeviceGuard that is specialized for MPS.
struct OptionalMPSGuard {
explicit OptionalMPSGuard() : guard_() {}
explicit OptionalMPSGuard(c10::optional<Device> device_opt)
: guard_(device_opt) {}
/// Set the current MPS device to the passed device index, if it is not
/// nullopt
explicit OptionalMPSGuard(c10::optional<DeviceIndex> device_index_opt)
: guard_(device_index_opt) {}
// Copy is not allowed
OptionalMPSGuard(const OptionalMPSGuard&) = delete;
OptionalMPSGuard& operator=(const OptionalMPSGuard&) = delete;
OptionalMPSGuard(OptionalMPSGuard&& other) = delete;
OptionalMPSGuard& operator=(OptionalMPSGuard&& other) = delete;
/// Sets the MPS device to the given device, initializing the guard if it
/// is not already initialized. Errors if the given device is not a MPS
/// device.
void set_device(Device device) {
guard_.set_device(device);
}
/// Sets the MPS device to the given device, initializing the guard if it is
/// not already initialized. Errors if the given device is not a MPS device.
void reset_device(Device device) {
guard_.reset_device(device);
}
/// Sets the MPS device to the given device index, initializing the guard if
/// it is not already initialized.
void set_index(DeviceIndex device_index) {
guard_.set_index(device_index);
}
/// Returns the device that was set immediately prior to initialization of the
/// guard, or nullopt if the guard is uninitialized.
c10::optional<Device> original_device() const {
return guard_.original_device();
}
/// Returns the most recent device that was set using this device guard,
/// either from construction, or via set_device, if the guard is initialized,
/// or nullopt if the guard is uninitialized.
c10::optional<Device> current_device() const {
return guard_.current_device();
}
/// Restore the original MPS device, resetting this guard to uninitialized
/// state.
void reset() {
guard_.reset();
}
private:
c10::impl::InlineOptionalDeviceGuard<MPSGuardImpl> guard_;
};
C10_REGISTER_GUARD_IMPL(MPS, MPSGuardImpl);
}} // namespace at::mps
| 4,912
| 27.398844
| 82
|
h
|
null |
pytorch-main/aten/src/ATen/mps/MPSHooks.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#include <ATen/detail/MPSHooksInterface.h>
#include <ATen/Generator.h>
#include <c10/util/Optional.h>
namespace at { namespace mps {
// The real implementation of MPSHooksInterface
struct MPSHooks : public at::MPSHooksInterface {
MPSHooks(at::MPSHooksArgs) {}
void initMPS() const override;
// MPSDevice interface
bool hasMPS() const override;
bool isOnMacOS13orNewer(unsigned minor) const override;
// MPSGeneratorImpl interface
const Generator& getDefaultMPSGenerator() const override;
// MPSStream interface
void deviceSynchronize() const override;
void commitStream() const override;
void* getCommandBuffer() const override;
void* getDispatchQueue() const override;
// MPSAllocator interface
Allocator* getMPSDeviceAllocator() const override;
void emptyCache() const override;
size_t getCurrentAllocatedMemory() const override;
size_t getDriverAllocatedMemory() const override;
void setMemoryFraction(double ratio) const override;
void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const override;
void profilerStopTrace() const override;
};
}} // at::mps
| 1,177
| 28.45
| 91
|
h
|
null |
pytorch-main/aten/src/ATen/mps/MPSProfiler.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#include <ATen/Tensor.h>
#include <ATen/mps/MPSStream.h>
#include <ATen/mps/MPSAllocatorInterface.h>
#include <os/signpost.h>
#include <os/log.h>
#include <sstream>
#include <string>
#include <atomic>
#include <unordered_map>
#include <utility>
#include <ctime>
namespace at::mps {
namespace Profiler {
struct BaseInfo {
// profiling info types
enum class Type {
GRAPH,
KERNEL,
COPY,
CPU_FALLBACK,
};
BaseInfo(Type infoType, uint64_t Id, const uintptr_t Handle) :
type(infoType), profileId(Id), handle(Handle) { }
virtual ~BaseInfo() = default;
// type of profiling info
Type type;
// unique profile ID for execution instances of operations or copies
uint64_t profileId;
// ID generated by os_signpost
// since it's possible to use event and interval-based signposts at the
// same time, we need separate IDs for each.
os_signpost_id_t eventSignpostId = 0, intervalSignpostId = 0;
// accumulated GPU time in ms (obtained from CompletionHandler's "GPUEndTime - GPUStartTime")
std::atomic<double> totalGpuTime{0.0};
// accumulated Scheduling time in ms (obtained from CompletionHandler's "KernelEndTime - KernelStartTime")
std::atomic<double> totalSchedulingTime{0.0};
// indicates if the operation or copy execution has completed
std::atomic_bool completed{false};
// handle used to identify the profile info's instance (usually the pointer)
const uintptr_t handle;
virtual const std::string toString(double gpuTime = 0, double schedulingTime = 0) const;
// builds a string for a tensor (format: Device:ScalarType[tensor.sizes()])
static std::string buildTensorString(const Tensor& tensor, bool includeBufferId = false) {
if (tensor.defined()) {
std::stringstream tensorStr;
auto deviceType = tensor.device().type();
tensorStr << c10::DeviceTypeName(deviceType);
// see comments for INCLUDE_BUFFER_ID
if (includeBufferId && deviceType == at::kMPS) {
id<MTLBuffer> buffer = __builtin_bit_cast(id<MTLBuffer>, tensor.storage().data());
tensorStr << "(buf#" << (getIMPSAllocator()->getBufferId(buffer))
<< ":" << buffer.retainCount << ")";
}
tensorStr << ":"
<< tensor.scalar_type() << tensor.sizes();
return tensorStr.str();
} else {
return "undefined";
}
}
static uint64_t getTime() {
return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
}
};
struct OperationInfo : BaseInfo {
OperationInfo(const void* Handle, bool IsGraph, uint64_t Id, const std::string& StrKey) :
BaseInfo(IsGraph ? Type::GRAPH : Type::KERNEL, Id, uintptr_t(Handle)), strKey(StrKey) { }
uint64_t runCount = 0;
std::string strKey;
const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
// builds a string for a kernel
static std::string buildKernelString(const std::string& kernelName,
const TensorList& tensors,
bool includeBufferId = false) {
std::stringstream kernelStr;
kernelStr << kernelName;
for (const Tensor& tensor: tensors) {
kernelStr << ":" << BaseInfo::buildTensorString(tensor, includeBufferId);
}
return kernelStr.str();
}
};
struct CpuFbInfo : BaseInfo {
CpuFbInfo(uint64_t Id, const std::string& OpName) :
BaseInfo(Type::CPU_FALLBACK, Id, 0), opName(OpName) { }
uint64_t runCount = 0;
// the current and total overhead of copies in bytes required to convert the Op's
// input tensors from MPS to CPU and then output from CPU back to MPS
size_t currentCopyOverhead = 0;
size_t totalCopyOverhead = 0;
std::string opName;
std::string strKey;
uint64_t startTime = 0;
const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
void updateCopyOverhead(const TensorList& tensors) {
currentCopyOverhead = 0;
for (const Tensor& tensor: tensors) {
if (tensor.defined()) {
currentCopyOverhead += tensor.nbytes();
}
}
totalCopyOverhead += currentCopyOverhead;
}
};
struct CopyInfo : BaseInfo {
enum class Kind {
MPS_TO_MPS,
MPS_TO_CPU,
CPU_TO_MPS,
};
CopyInfo(const void* Handle, size_t Length, uint64_t Id, bool IsNonBlocking, bool UsesBlitter) :
BaseInfo(Type::COPY, Id, uintptr_t(Handle)), kind(Kind::MPS_TO_MPS),
length(Length), isNonBlocking(IsNonBlocking), usesBlitter(UsesBlitter) { }
Kind kind;
size_t length;
bool isNonBlocking;
bool usesBlitter;
std::string srcStrKey;
std::string dstStrKey;
// for copies that don't use blitters, we measure CPU time
uint64_t startTime = 0;
const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
static std::string buildTensorString(const void* buffer, const OptionalTensorRef tensor, bool includeBufferId = false);
static bool isStorageOnMPS(const void* buffer, const OptionalTensorRef tensor) {
if (tensor.has_value()) {
return tensor->device().type() == at::kMPS;
}
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(buffer);
// getUnalignedBufferSize() returns -1 if input buffer is not on MPS device
return getIMPSAllocator()->getUnalignedBufferSize(buffer) >= 0;
}
static Kind getCopyKind(const void* srcBuffer, const void* dstBuffer,
const OptionalTensorRef srcTensor, const OptionalTensorRef dstTensor) {
const bool isSrcOnMPS = isStorageOnMPS(srcBuffer, srcTensor);
const bool isDstOnMPS = isStorageOnMPS(dstBuffer, dstTensor);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isSrcOnMPS || isDstOnMPS);
if (isSrcOnMPS && !isDstOnMPS) {
return Kind::MPS_TO_CPU;
} else if (!isSrcOnMPS && isDstOnMPS) {
return Kind::CPU_TO_MPS;
}
return Kind::MPS_TO_MPS;
}
};
struct CopyStat : CopyInfo {
explicit CopyStat(std::string CopyKindStr) :
CopyInfo(nullptr, 0, 0, false, false), kindStr(std::move(CopyKindStr)) {}
// total number of copies
size_t totalCount = 0;
// number of Scalar copies (i.e., less than sizeof(int64))
size_t scalarsCount = 0;
// number of blocking copies (i.e., require syncing to GPU)
size_t blockingCount = 0;
// number of copies that used memcpy(), instead of Metal Blit Encoder
size_t memcpyCount = 0;
// accumulated GPU time in ms for the scalar copies
std::atomic<double> scalarsGpuTime{0.0};
// copy kind in string type
std::string kindStr;
};
class MPSProfiler {
public:
// lower 16 bits used for profiler options
enum ProfileOptions : uint32_t {
OPTIONS_NONE = 0,
// ALL_* means, all signpost types (RUN_OPERATION|BLIT_COPY|CPU_FALLBACK, etc.)
// (used for convenience to not compute bit flags by OR-ing manually)
// trace all signpost types using events
ALL_SIGNPOST_EVENTS = (1 << 0),
// trace all signpost types using intervals
ALL_SIGNPOST_INTERVALS = (1 << 1),
// always wait for command buffer to finish executing after each commit
WAIT_UNTIL_COMPLETED = (1 << 2),
// for interval-based signposts, include the scheduling portion of
// Graph/Kernel/Copy executions as well.
// if flag is disable, only "GPU run time" is included in interval,
// and not schedule time.
INCLUDE_SCHEDULE_INTERVAL = (1 << 3),
// use these if you need to trace signposts types individually (rarely required)
// trace signpost using intervals
USE_INTERVALS = (1 << 4),
// trace signpost by emitting events
USE_EVENTS = (1 << 5),
// used for sanity check (Change this when new option added)
OPTIONS_COUNT = (USE_EVENTS << 1) - 1,
};
// when adding new types, #define the type string in MPSProfiler.mm as well.
// upper 16 bits used for event types
enum SignpostTypes : uint32_t {
SIGNPOST_NONE = 0,
// trace signposts for PyTorch operation executions
RUN_OPERATION = (1 << 16),
// trace signposts for blitter copies
BLIT_COPY = (1 << 17),
// trace signposts for ops that fall back on CPU
CPU_FALLBACK = (1 << 18),
// used for sanity check (Change this when new type added)
SIGNPOST_COUNT = (CPU_FALLBACK << 1) - 1,
};
enum LogOptions : uint32_t {
LOG_NONE = 0,
// Info logging options during execution
// -------------------------------------
// prints operation info (id/key/run_count) during execution
OPERATION_INFO = (1 << 0),
// prints copy info (src/dst tensors/buffers, size, etc.) during execution
COPY_INFO = (1 << 1),
// prints CPU Fallback info (id/runCount/opName/copyOverhead) during execution
CPU_FALLBACK_INFO = (1 << 2),
// Profiling Statistics logging options when process terminates
// ------------------------------------------------------------
// prints all stats (OPERATION_STATS, COPY_STATS, CPU_FALLBACK_STATS) before process terminates
// this is convenient to not combine following stats bit flags manually
ALL_STATS = (1 << 3),
// prints operation stats (GPU times, run count, etc.) before process terminates
OPERATION_STATS = (1 << 4),
// prints copies stats (GPU times, copy kinds, sizes, etc.) before process terminates
COPY_STATS = (1 << 5),
// prints CPU Fallback stats (CPU times, run times, size of MPS<->CPU copies
// for tensors, etc.) before process terminates
CPU_FALLBACK_STATS = (1 << 6),
// Metadata format options when logging the info
// ---------------------------------------------
// if enabled, includes GPU run time in metadata (i.e., GPUEndTime-GPUStartTime
// from Metal Command Buffers) (e.g., [GPU=0.324 ms])
INCLUDE_GPU_TIME = (1 << 7),
// if enabled, includes GPU scheduling time in metadata separately
// (i.e., KernelEndTime-KernelStartTime from Metal Command Buffers)
// e.g., [GPU=0.324 ms, KRNL=0.036 ms]
INCLUDE_KERNEL_TIME = (1 << 8),
// if enabled, includes the unique buffer ID in metadata for the storage
// of a tensor that was allocated on MPSAllocator. This is useful (along with
// the EV "PYTORCH_DEBUG_MPS_ALLOCATOR") to identify buffers that are involved
// with various operations.
INCLUDE_BUFFER_ID = (1 << 9),
// used for sanity check (Change this when new option added)
LOG_COUNT = (INCLUDE_BUFFER_ID << 1) - 1,
};
explicit MPSProfiler();
~MPSProfiler();
// the handle is either "MPSGraph*" or "id<MTLComputePipelineState>" for Metal Kernels
// the beginProfile*() functions return a profileId which is unique per graph/kernel/copy
uint64_t beginProfileKernel(const void* handle, const std::string& strKey, bool isGraph);
uint64_t beginProfileKernel(const void* handle, const std::string& kernelName, const TensorList& tensors);
uint64_t beginProfileCopy(const void* srcBuffer, const void* dstBuffer,
const OptionalTensorRef srcTensor,
const OptionalTensorRef dstTensor,
size_t length, bool isNonBlocking, bool usesBlitter = true);
uint64_t beginProfileCPUFallback(const std::string& opName, const TensorList& tensors);
void beginProfileGPUInterval(const void* handle);
void endProfileCopy(uint64_t profileId, SyncType syncType);
void endProfileKernel(const void* handle, SyncType syncType = SyncType::NONE);
void endProfileCPUFallback(const std::string& opName);
// these are used to hook into Python bindings for torch.mps.profiler module.
// this enables generating OS Signpost traces from MPSProfiler on-demand
// during runtime (instead of environment variables).
// The "mode" could be either "interval", "event", or both "interval,event"
// for interval-based and/or event-based signpost tracing.
void StartTrace(const string& mode, bool waitUntilCompleted);
void StopTrace();
// convenience functions to indicate whether signpost tracing or
// logging are enabled for the SignpostTypes
bool isOperationProfilingEnabled() const {
return (m_signpost_types & SignpostTypes::RUN_OPERATION) ||
(m_log_options & (LogOptions::OPERATION_INFO | LogOptions::OPERATION_STATS));
}
bool isCopyProfilingEnabled() const {
return (m_signpost_types & SignpostTypes::BLIT_COPY) ||
(m_log_options & (LogOptions::COPY_INFO | LogOptions::COPY_STATS));
}
bool isCPUFallbackProfilingEnabled() const {
return (m_signpost_types & SignpostTypes::CPU_FALLBACK) ||
(m_log_options & (LogOptions::CPU_FALLBACK_INFO | LogOptions::CPU_FALLBACK_STATS));
}
bool isSignpostTracingEnabled() const {
return (m_signpost_types != SignpostTypes::SIGNPOST_NONE);
}
private:
// indicates what type of signpost types are enabled and traced by MPS profiler.
uint32_t m_signpost_types = 0;
uint32_t m_profile_options = 0;
uint32_t m_log_options = 0;
uint64_t m_kernel_counter = 0;
uint64_t m_graph_counter = 0;
uint64_t m_cpu_fb_counter = 0;
uint64_t m_copy_counter = 0;
// technically, it's possible to trace both events and intervals at the same time
// so we use separate os_log categories for them
os_log_t m_os_log_events;
os_log_t m_os_log_intervals;
// stats logging could run either from destructor or signal handler
// so this is used to check if logging has already started.
std::atomic_bool hasLoggedStats{false};
// indicates there are pending completionHandler callbacks that haven't been called yet.
std::atomic_bool hasPendingCompletionHandlers{false};
// used to capture sigint signal to log profiling stats
static struct sigaction currentSigint, previousSigint;
// We use the following lists for two reasons:
// 1- for interval-based signposts the "begin" point won't be in same function
// as the "end" point where we need to be able to retrieve signpost's info
// 2- if Operations info need to be logged when process ends using LogOptions::OPERATION_INFO.
// the pointer key for this map is either "MPSGraph*" or "id<MTLComputePipelineState>" for Metal Kernels
// this list is retained and could be logged along with aggregate profiling numbers when the process ends.
std::unordered_map<uintptr_t, std::unique_ptr<OperationInfo>> m_op_info_list{};
// the string key for this map is the op name that we fall back to execute on CPU
// this list is retained and could be logged along with aggregate profiling numbers when the process ends.
std::unordered_map<std::string, std::unique_ptr<CpuFbInfo>> m_cpu_fb_info_list{};
// this list contains the info for copies, and its key is the unique profileId
// which is generated from m_copy_counter
// The copyInfo list is not retained.
std::unordered_map<uint64_t, std::unique_ptr<CopyInfo>> m_copy_info_list{};
// a short list that contains copy stats
std::unordered_map<CopyInfo::Kind, std::unique_ptr<CopyStat>> m_copy_stat_list{};
void initialize();
void beginProfileExecution(BaseInfo& info, bool cpuExecution = false);
void endProfileExecution(BaseInfo& info, os_signpost_id_t event_signpost_id,
os_signpost_id_t interval_signpost_id,
double gpuTime, double schedulingTime);
void addProfilerScheduledHandler(BaseInfo& info);
void addProfilerCompletedHandler(BaseInfo& info, SyncType syncType);
void emitSignpostEvent(SignpostTypes signpost_type, os_signpost_id_t signpost_id,
const std::string& msg) const;
void beginSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id,
const std::string& msg) const;
void endSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id) const;
void updateCopyStats(const CopyInfo& copyInfo, double gpuTime, double schedulingTime);
// returns true if logging the profiling info "during the execution" is enabled
bool isProfileInfoLoggingEnabled(BaseInfo::Type infoType, bool isExecutionEnded);
// logs all the profiling stats that are enabled
void logProfilingStats();
// logs kernel profiling stats when the process ends.
void logOperationsProfilingStats(std::FILE* f) const;
// logs CPU Fallback profiling stats when the process ends.
void logCPUFallbackProfilingStats(std::FILE* f) const;
// logs copy profiling stats when the process ends.
void logCopyProfilingStats(std::FILE* f) const;
os_signpost_id_t generateSignpostId(os_signpost_type_t signpostType, const void* ptr = nullptr);
static SignpostTypes getSignpostType(BaseInfo::Type infoType);
static void handleIntSignal(int signal);
};
} // namespace Profiler
Profiler::MPSProfiler& getMPSProfiler();
} // namespace at::mps
| 16,688
| 41.357868
| 121
|
h
|
null |
pytorch-main/aten/src/ATen/mps/MPSStream.h
|
// Copyright © 2022 Apple Inc.
#pragma once
#include <cstdint>
#include <utility>
#include <c10/core/DeviceGuard.h>
#include <c10/util/Exception.h>
#include <c10/core/Stream.h>
#include <ATen/mps/MPSDevice.h>
#ifdef __OBJC__
#include <Foundation/Foundation.h>
#include <Metal/Metal.h>
#include <MetalPerformanceShaders/MetalPerformanceShaders.h>
#include <MetalPerformanceShadersGraph/MetalPerformanceShadersGraph.h>
typedef id<MTLCommandQueue> MTLCommandQueue_t;
typedef id<MTLCommandBuffer> MTLCommandBuffer_t;
typedef id<MTLComputeCommandEncoder> MTLComputeCommandEncoder_t;
typedef id<MTLSharedEvent> MTLSharedEvent_t;
typedef id<MTLDevice> MTLDevice_t;
#else
typedef void* MTLCommandQueue_t;
typedef void* MTLCommandQueue;
typedef void* MTLCommandBuffer_t;
typedef void* MTLCommandBuffer;
typedef void* MTLComputeCommandEncoder_t;
typedef void* MTLSharedEvent_t;
typedef void* dispatch_queue_t;
typedef void* MTLDevice_t;
#define nil NULL;
#endif
namespace at {
namespace mps {
//-----------------------------------------------------------------
// MPSStream
//-----------------------------------------------------------------
enum class SyncType {
NONE, // no commit to command buffer
COMMIT, // commit and flush the command buffer
COMMIT_AND_WAIT, // flush and wait for command buffer execution to finish
COMMIT_AND_CONTINUE,// commit and continue with a new underlying command buffer
COMMIT_ADAPTIVE, // commit adaptively based on available memory
};
class TORCH_API MPSStream
{
public:
enum Unchecked { UNCHECKED };
/// Construct a MPSStream from a Stream. This construction is checked,
/// and will raise an error if the Stream is not, in fact, a MPS stream.
explicit MPSStream(Stream stream);
~MPSStream();
MTLCommandQueue_t commandQueue() const { return _commandQueue; };
dispatch_queue_t queue() const { return _serialQueue; }
MPSCommandBuffer* commandBuffer();
MTLComputeCommandEncoder_t commandEncoder();
void endKernelCoalescing();
void synchronize(SyncType syncType);
void fill(id<MTLBuffer> buffer, uint8_t value, size_t length, size_t offset, SyncType syncType = SyncType::NONE);
void copy(id<MTLBuffer> srcBuffer, id<MTLBuffer> dstBuffer,
size_t length, size_t srcOffset, size_t dstOffset,
uint64_t profileId, SyncType syncType = SyncType::NONE);
void copy_and_sync(id<MTLBuffer> srcBuffer, id<MTLBuffer> dstBuffer,
size_t length, size_t srcOffset, size_t dstOffset,
bool non_blocking, uint64_t profileId);
void executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDictionary* results, SyncType syncType = SyncType::NONE);
void addCompletedHandler(MTLCommandBufferHandler block);
/// Get the MPS device index that this stream is associated with.
c10::DeviceIndex device_index() const { return _stream.device_index(); }
MTLCommandQueue_t stream() const { return _commandQueue; };
MTLDevice_t device() const { return [_commandQueue device];}
/// Explicit conversion to Stream.
Stream unwrap() const { return _stream; }
private:
Stream _stream;
MTLCommandQueue_t _commandQueue = nil;
MPSCommandBuffer* _commandBuffer = nil;
MPSCommandBuffer* _prevCommandBuffer = nil;
MTLComputeCommandEncoder_t _commandEncoder = nil;
MPSGraphExecutionDescriptor *_executionDescriptor = nil;
MPSGraphCompilationDescriptor *_compilationDescriptor = nil;
dispatch_queue_t _serialQueue = nullptr;
// CommitAndContinue is enabled by default
bool _enableCommitAndContinue = true;
// use synchronize() to access any of these commit functions outside MPSStream
void commit();
void commitAndWait();
void commitAndContinue();
void flush();
};
/**
* Get the current MPS stream
*/
TORCH_API MPSStream* getCurrentMPSStream();
/**
* Get the default MPS stream
*/
TORCH_API MPSStream* getDefaultMPSStream();
//-----------------------------------------------------------------
// MPSStreamImpl
//-----------------------------------------------------------------
class TORCH_API MPSStreamImpl
{
public:
/**
* Gets single instance of the MPSStream.
*/
static MPSStream* getInstance();
private:
static MPSStream* _stream;
MPSStreamImpl();
};
//-----------------------------------------------------------------
// MPSEvent
//-----------------------------------------------------------------
struct TORCH_API MPSEvent
{
// for a new instance of MPSEvent, sometimes we want an empty shell and don't
// necessarily want to create events or listeners. So we defer initialization
// until we actually use the event (e.g., record, notify, etc.)
MPSEvent(bool deferInitialization = true);
~MPSEvent();
MTLSharedEvent_t event() const {return _event; }
void recordEvent(bool syncEvent = false);
void waitForEvent(bool syncEvent = false); // waits on the cpu
void notifyEvent(MTLSharedEventNotificationBlock block);
bool queryEvent() const;
uint64_t getCurrentValue() const { return _signalCounter; }
void setCurrentValue(uint64_t currValue) { _signalCounter = currValue; }
private:
bool is_initialized;
uint64_t _signalCounter;
MPSStream* _stream;
MTLSharedEvent_t _event;
MTLSharedEventListener* _listener;
void initialize();
};
typedef MPSEvent* mpsEvent_t;
} // namespace mps
} // namespace at
| 5,349
| 30.656805
| 123
|
h
|
null |
pytorch-main/aten/src/ATen/native/Activation.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <c10/util/Exception.h>
#include <c10/util/string_view.h>
namespace c10 {
class Scalar;
}
namespace at {
struct TensorIterator;
struct TensorIteratorBase;
class TensorBase;
}
namespace at::native {
// These constants control the approximation behavior of gelu function.
enum class GeluType {
None, // Baseline Gelu
Tanh, // Tahn Gelu Approximation
END
};
static GeluType get_gelutype_enum(const c10::string_view approximate) {
if (approximate == "none") {
return GeluType::None;
} else if (approximate == "tanh") {
return GeluType::Tanh;
} else {
TORCH_CHECK(false, "approximate argument must be either none or tanh.");
}
}
static std::string gelutype_to_string(const GeluType type) {
switch(type) {
case GeluType::None: return "none";
case GeluType::Tanh: return "tanh";
default: TORCH_CHECK(false, "unknown GELU type: ", static_cast<int>(type));
}
}
using structured_activation_fn = void (*)(TensorIteratorBase&);
using structured_activation_backward_fn = void (*)(TensorIteratorBase&);
using activation_fn = void (*)(TensorIterator&);
using activation_backward_fn = void (*)(TensorIterator&);
using softplus_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
using softplus_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
using threshold_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&);
using hardtanh_backward_fn = void (*)(TensorIterator&, const c10::Scalar&, const c10::Scalar&);
using hardsigmoid_fn = void(*)(TensorIteratorBase&);
using hardsigmoid_backward_fn = void(*)(TensorIteratorBase&);
using hardswish_fn = void(*)(TensorIterator&);
using hardswish_backward_fn = void(*)(TensorIterator&);
using shrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
using softshrink_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
using shrink_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
using elu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&);
using elu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&, const c10::Scalar&, const c10::Scalar&, bool);
using leaky_relu_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
using leaky_relu_backward_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
using log_sigmoid_cpu_fn = void (*)(TensorBase&, TensorBase&, const TensorBase&);
using gelu_fn = void (*)(TensorIteratorBase&, GeluType);
using gelu_backward_fn = void (*)(TensorIteratorBase&, GeluType);
using glu_jvp_fn = void (*)(TensorIteratorBase&);
DECLARE_DISPATCH(elu_fn, elu_stub);
DECLARE_DISPATCH(elu_backward_fn, elu_backward_stub);
DECLARE_DISPATCH(softplus_fn, softplus_stub);
DECLARE_DISPATCH(softplus_backward_fn, softplus_backward_stub);
DECLARE_DISPATCH(log_sigmoid_cpu_fn, log_sigmoid_cpu_stub);
DECLARE_DISPATCH(activation_backward_fn, log_sigmoid_backward_stub);
DECLARE_DISPATCH(threshold_fn, threshold_stub);
DECLARE_DISPATCH(gelu_fn, GeluKernel);
DECLARE_DISPATCH(gelu_backward_fn, GeluBackwardKernel);
DECLARE_DISPATCH(hardtanh_backward_fn, hardtanh_backward_stub);
DECLARE_DISPATCH(hardsigmoid_fn, hardsigmoid_stub);
DECLARE_DISPATCH(hardsigmoid_backward_fn, hardsigmoid_backward_stub);
DECLARE_DISPATCH(hardswish_fn, hardswish_stub);
DECLARE_DISPATCH(hardswish_backward_fn, hardswish_backward_stub);
DECLARE_DISPATCH(shrink_fn, hardshrink_stub);
DECLARE_DISPATCH(softshrink_fn, softshrink_stub);
DECLARE_DISPATCH(shrink_backward_fn, shrink_backward_stub);
DECLARE_DISPATCH(leaky_relu_fn, leaky_relu_stub);
DECLARE_DISPATCH(leaky_relu_backward_fn, leaky_relu_backward_stub);
DECLARE_DISPATCH(structured_activation_fn, glu_stub);
DECLARE_DISPATCH(activation_backward_fn, glu_backward_stub);
DECLARE_DISPATCH(glu_jvp_fn, glu_jvp_stub);
DECLARE_DISPATCH(structured_activation_fn, silu_stub);
DECLARE_DISPATCH(structured_activation_backward_fn, silu_backward_stub);
DECLARE_DISPATCH(structured_activation_fn, mish_stub);
DECLARE_DISPATCH(activation_backward_fn, mish_backward_stub);
DECLARE_DISPATCH(activation_fn, prelu_stub);
DECLARE_DISPATCH(activation_backward_fn, prelu_backward_stub);
} // namespace at::native
| 4,270
| 42.141414
| 120
|
h
|
null |
pytorch-main/aten/src/ATen/native/AdaptivePooling.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/native/DispatchStub.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/irange.h>
#include <cmath>
namespace at::native {
using adaptive_avg_pooling_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
using adaptive_avg_pooling_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
DECLARE_DISPATCH(adaptive_avg_pooling_fn, adaptive_avg_pool2d_kernel);
DECLARE_DISPATCH(adaptive_avg_pooling_backward_fn, adaptive_avg_pool2d_backward_kernel);
using adaptive_max_pooling_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
using adaptive_max_pooling_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
DECLARE_DISPATCH(adaptive_max_pooling_fn, adaptive_max_pool2d_kernel);
DECLARE_DISPATCH(adaptive_max_pooling_backward_fn, adaptive_max_pool2d_backward_kernel);
static inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
return (a / b) * c + ((a % b) * c) / b;
}
static inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
return 1 + ((a + 1) * c - 1) / b;
}
static inline void adaptive_pool_empty_output_check(const Tensor& gradOutput_, const char* arg_name) {
int64_t ndim = gradOutput_.ndimension();
for (const auto i : c10::irange(1, ndim)) {
TORCH_CHECK(gradOutput_.size(i) > 0,
arg_name, "(): Expected grad_output to have non-zero size for non-batch dimensions, "
"but grad_output has sizes ", gradOutput_.sizes(), " with dimension ", i,
" being empty");
}
}
} // namespace at::native
| 1,645
| 40.15
| 131
|
h
|
null |
pytorch-main/aten/src/ATen/native/BinaryOps.h
|
#pragma once
#include <ATen/core/TensorBase.h>
#include <ATen/native/DispatchStub.h>
#include <c10/core/Scalar.h>
#include <c10/util/TypeSafeSignMath.h>
#if defined(__CUDA_ARCH__)
#include <c10/cuda/CUDAMathCompat.h>
#define compat_copysign c10::cuda::compat::copysign
#elif defined(__HIPCC__)
#include <c10/hip/HIPMathCompat.h>
#define compat_copysign c10::hip::compat::copysign
#else
#include <c10/util/copysign.h>
#define compat_copysign c10::copysign
#endif
namespace at {
struct TensorIterator;
struct TensorIteratorBase;
}
namespace at::native {
inline void alpha_check(const ScalarType dtype, const Scalar& alpha) {
TORCH_CHECK(! alpha.isBoolean() || dtype == ScalarType::Bool,
"Boolean alpha only supported for Boolean results.");
TORCH_CHECK(isFloatingType(dtype) || isComplexType(dtype)
|| alpha.isIntegral(true),
"For integral input tensors, argument alpha must not be a floating point number.");
TORCH_CHECK(isComplexType(dtype) || !alpha.isComplex(),
"For non-complex input tensors, argument alpha must not be a complex number.")
}
// Basic checking for all sub functions.
inline void sub_check(const TensorBase& self, const TensorBase& other) {
TORCH_CHECK(self.scalar_type() != kBool || other.scalar_type() != kBool,
"Subtraction, the `-` operator, with two bool tensors is not supported. "
"Use the `^` or `logical_xor()` operator instead.")
TORCH_CHECK(self.scalar_type() != kBool && other.scalar_type() != kBool,
"Subtraction, the `-` operator, with a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
}
inline void sub_check(const TensorBase& self, const Scalar& scalar) {
TORCH_CHECK(self.scalar_type() != kBool || !scalar.isBoolean(),
"Subtraction, the `-` operator, with two bool tensors is not supported. "
"Use the `^` or `logical_xor()` operator instead.")
TORCH_CHECK(self.scalar_type() != kBool && !scalar.isBoolean(),
"Subtraction, the `-` operator, with a bool tensor is not supported. "
"If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.");
}
#if defined(__CUDACC__) || defined(__HIPCC__)
#define HOST_DEVICE __host__ __device__
#else
#define HOST_DEVICE
#endif
// NOTE: [Floor Division in Python]
// Python's __floordiv__ operator is more complicated than just floor(a / b).
// It aims to maintain the property: a == (a // b) * b + remainder(a, b)
// which can otherwise fail due to rounding errors in the remainder.
// So, instead it is calculated as: a // b = (a - remainder(a, b)) / b
// With some additional fix-ups added to the result.
//
// For reference, see CPython's implementation:
// https://github.com/python/cpython/blob/ace008c531dd685a30c1dd68f9b5ba35f20171cf/Objects/floatobject.c#L636
template <typename scalar_t>
inline HOST_DEVICE scalar_t div_floor_floating(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ {
if (C10_UNLIKELY(b == 0)) {
// Divide by zero: return standard IEEE result
return a / b;
}
auto mod = std::fmod(a, b);
auto div = (a - mod) / b;
if ((mod != 0) && (b < 0) != (mod < 0)) {
div -= scalar_t(1);
}
scalar_t floordiv;
if (div != 0) {
floordiv = std::floor(div);
if (div - floordiv > scalar_t(0.5)) {
floordiv += scalar_t(1.0);
}
} else {
floordiv = compat_copysign(scalar_t(0), a / b);
}
return floordiv;
}
template <typename scalar_t>
inline HOST_DEVICE scalar_t div_floor_integer(scalar_t a, scalar_t b) {
if (c10::signs_differ(a, b)) {
// Subtracts one from the results of truncation division if the
// divisor and dividend have different sign(bit)s and the remainder of
// the division is nonzero
const auto quot = a / b;
const auto rem = a % b;
return rem ? quot - 1 : quot;
}
return a / b;
}
using structured_binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
using structured_binary_fn_double = void(*)(TensorIteratorBase&, double);
using structured_binary_fn = void(*)(TensorIteratorBase&);
using binary_fn_alpha = void(*)(TensorIteratorBase&, const Scalar& alpha);
using binary_fn_double = void(*)(TensorIterator&, double);
using binary_fn = void(*)(TensorIterator&);
using binary_clamp_fn_alpha =
void(*)(TensorIterator&, const Scalar& alpha, const Scalar& min_val, const Scalar& max_val);
// NB: codegenned
DECLARE_DISPATCH(structured_binary_fn_alpha, add_stub);
DECLARE_DISPATCH(binary_clamp_fn_alpha, add_clamp_stub);
DECLARE_DISPATCH(structured_binary_fn_alpha, sub_stub);
DECLARE_DISPATCH(structured_binary_fn, mul_stub);
DECLARE_DISPATCH(structured_binary_fn, div_true_stub);
DECLARE_DISPATCH(structured_binary_fn, div_floor_stub);
DECLARE_DISPATCH(structured_binary_fn, div_trunc_stub);
DECLARE_DISPATCH(structured_binary_fn, atan2_stub);
DECLARE_DISPATCH(structured_binary_fn, remainder_stub);
DECLARE_DISPATCH(structured_binary_fn, bitwise_and_stub);
DECLARE_DISPATCH(structured_binary_fn, bitwise_or_stub);
DECLARE_DISPATCH(structured_binary_fn, bitwise_xor_stub);
DECLARE_DISPATCH(structured_binary_fn, lshift_stub);
DECLARE_DISPATCH(structured_binary_fn, rshift_stub);
DECLARE_DISPATCH(binary_fn, logical_xor_stub);
DECLARE_DISPATCH(binary_fn, logical_and_stub);
DECLARE_DISPATCH(binary_fn, logical_or_stub);
DECLARE_DISPATCH(structured_binary_fn, lt_stub);
DECLARE_DISPATCH(structured_binary_fn, le_stub);
DECLARE_DISPATCH(structured_binary_fn, gt_stub);
DECLARE_DISPATCH(structured_binary_fn, ge_stub);
DECLARE_DISPATCH(structured_binary_fn, eq_stub);
DECLARE_DISPATCH(structured_binary_fn, ne_stub);
DECLARE_DISPATCH(binary_fn, max_elementwise_stub);
DECLARE_DISPATCH(binary_fn, min_elementwise_stub);
DECLARE_DISPATCH(structured_binary_fn, maximum_stub);
DECLARE_DISPATCH(structured_binary_fn, minimum_stub);
DECLARE_DISPATCH(structured_binary_fn, fmax_stub);
DECLARE_DISPATCH(structured_binary_fn, fmin_stub);
DECLARE_DISPATCH(structured_binary_fn_double, smooth_l1_stub);
DECLARE_DISPATCH(binary_fn_double, huber_stub);
DECLARE_DISPATCH(structured_binary_fn, sigmoid_backward_stub);
DECLARE_DISPATCH(binary_fn_alpha, logit_backward_stub);
DECLARE_DISPATCH(structured_binary_fn, tanh_backward_stub);
DECLARE_DISPATCH(structured_binary_fn, mse_stub);
DECLARE_DISPATCH(structured_binary_fn, fmod_stub);
DECLARE_DISPATCH(structured_binary_fn, logaddexp_stub);
DECLARE_DISPATCH(structured_binary_fn, logaddexp2_stub);
DECLARE_DISPATCH(structured_binary_fn, gcd_stub);
DECLARE_DISPATCH(structured_binary_fn, lcm_stub);
DECLARE_DISPATCH(structured_binary_fn, hypot_stub);
DECLARE_DISPATCH(structured_binary_fn, igamma_stub);
DECLARE_DISPATCH(structured_binary_fn, igammac_stub);
DECLARE_DISPATCH(structured_binary_fn, nextafter_stub);
DECLARE_DISPATCH(structured_binary_fn, heaviside_stub);
DECLARE_DISPATCH(structured_binary_fn, copysign_stub);
DECLARE_DISPATCH(structured_binary_fn, xlogy_stub);
DECLARE_DISPATCH(structured_binary_fn, xlog1py_stub);
DECLARE_DISPATCH(structured_binary_fn, zeta_stub);
DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_t_stub);
DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_u_stub);
DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_v_stub);
DECLARE_DISPATCH(structured_binary_fn, chebyshev_polynomial_w_stub);
DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_h_stub);
DECLARE_DISPATCH(structured_binary_fn, hermite_polynomial_he_stub);
DECLARE_DISPATCH(structured_binary_fn, laguerre_polynomial_l_stub);
DECLARE_DISPATCH(structured_binary_fn, legendre_polynomial_p_stub);
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_t_stub);
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_u_stub);
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_v_stub);
DECLARE_DISPATCH(structured_binary_fn, shifted_chebyshev_polynomial_w_stub);
} // namespace at::native
| 8,002
| 42.494565
| 110
|
h
|
null |
pytorch-main/aten/src/ATen/native/BucketizationUtils.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/native/TypeProperties.h>
#include <ATen/ScalarOps.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/result_type.h>
#endif
namespace at::native {
// original values given by raw_*. If an original value is not contiguous, will make a contiguous copy to
// the corresponding trimmed_* value. Additionally, if the dtypes of the boundary and input tensor do not
// match, will change them to be a common super type so comparisons are done between the same types.
// For any trimmed_* tensor, if its outgoing value matches what it was incoming (typically null), then the
// corresponding raw_* version should be used since it was already contiguous of the right type.
inline void searchsorted_maybe_trim_input_tensors(
Tensor& trimmed_input,
Tensor& trimmed_boundaries,
Tensor& trimmed_sorter,
const Tensor& raw_input,
const Tensor& raw_boundaries,
const Tensor& raw_sorter) {
bool in_is_contiguous = raw_input.is_contiguous();
bool bd_is_contiguous = raw_boundaries.is_contiguous();
bool sort_is_contiguous = raw_sorter.is_contiguous();
if (!in_is_contiguous) {
TORCH_WARN_ONCE("torch.searchsorted(): input value tensor is non-contiguous, this will lower the performance due "
"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous input value "
"tensor if possible. This message will only appear once per program.");
trimmed_input = raw_input.contiguous();
}
if (!bd_is_contiguous) {
TORCH_WARN_ONCE("torch.searchsorted(): boundary tensor is non-contiguous, this will lower the performance due "
"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous boundary "
"tensor if possible. This message will only appear once per program.");
trimmed_boundaries = raw_boundaries.contiguous();
}
if (!sort_is_contiguous) {
TORCH_WARN_ONCE("torch.searchsorted(): sorter tensor is non-contiguous, this will lower the performance due "
"to extra data copy when converting non-contiguous tensor to contiguous, please use contiguous sorter "
"tensor if possible. This message will only appear once per program.");
trimmed_sorter = raw_sorter.contiguous();
}
if (raw_input.dtype() != raw_boundaries.dtype()) {
at::native::ResultTypeState state = {};
state = at::native::update_result_type_state(raw_boundaries, state);
state = at::native::update_result_type_state(raw_input, state);
ScalarType common_stype = at::native::result_type(state);
TORCH_INTERNAL_ASSERT(common_stype != ScalarType::Undefined);
if (common_stype != raw_input.scalar_type()) {
trimmed_input = in_is_contiguous ? raw_input.to(common_stype) : trimmed_input.to(common_stype);
}
if (common_stype != raw_boundaries.scalar_type()) {
trimmed_boundaries = bd_is_contiguous ? raw_boundaries.to(common_stype) : trimmed_boundaries.to(common_stype);
}
}
}
/* unused but needed for internal jagged tensor class */
inline void searchsorted_maybe_trim_input_tensors(
Tensor& trimmed_input,
Tensor& trimmed_boundaries,
const Tensor& raw_input,
const Tensor& raw_boundaries) {
Tensor trimmed_sorter;
Tensor raw_sorter;
return searchsorted_maybe_trim_input_tensors(
trimmed_input,
trimmed_boundaries,
trimmed_sorter,
raw_input,
raw_boundaries,
raw_sorter);
}
inline bool searchsorted_dims_matched_before_last_dim(const Tensor& boundaries, const Tensor& input) {
if (boundaries.dim() != input.dim()) {
return false;
}
const auto& dims_bd = boundaries.sizes();
const auto& dims_in = input.sizes();
for (int64_t dim = 0; dim + 1 < boundaries.dim(); ++dim) {
if (dims_bd[dim] != dims_in[dim]) {
return false;
}
}
return true;
}
inline Tensor searchsorted_scalar_tensor(const Scalar& scalar, const c10::Device& device) {
auto tensor = c10::scalar_to_tensor(scalar, device);
// This is to adopt the scalar promotion rules defined in native/TypeProperties.h
// So we have the same type promotion rules as binary operations.
tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
return tensor;
}
inline void searchsorted_pre_check(
const Tensor& boundaries,
const Tensor& input,
const Tensor& output,
const bool out_int32,
const bool right,
const c10::optional<c10::string_view> side_opt,
const Tensor& sorter) {
if (side_opt) {
const c10::string_view side = *side_opt;
TORCH_CHECK(side == "left" || side == "right", "torch.searchsorted(): side can only be 'left' or 'right' but ",
"got ", side);
// assume the user has not explicitly set (right=False, side="right")
TORCH_CHECK(!right || side == "right", "torch.searchsorted(): side and right can't be set to opposites, got side "
"of ", side, " while right was True");
}
TORCH_CHECK(boundaries.device() == input.device(), "torch.searchsorted(): boundaries and input value tensors ",
"should have same device type, but got boundaries tensor device type ", boundaries.device(), " and input value ",
"tensor device type ", input.device());
if (sorter.defined()) {
TORCH_CHECK(sorter.device() == boundaries.device(), "torch.searchsorted(): sorter and boundary tensors should ",
"have same device type, but got sorter tensor device type ", sorter.device(), " and input value tensor ",
"device type ", boundaries.device());
TORCH_CHECK(sorter.sizes() == boundaries.sizes(), "torch.searchsorted(): boundary and sorter must have the same "
"size, but got boundary tensor ", boundaries.sizes(), "and got sorter tensor ", sorter.sizes());
TORCH_CHECK(sorter.scalar_type() == ScalarType::Long, "torch.searchsorted(): sorter must be a tensor of long ",
"dtype but got dtype ", sorter.scalar_type());
if (sorter.numel() > 0) {
auto minmax = sorter.aminmax();
int64_t vmin = std::get<0>(minmax).item().toLong();
int64_t vmax = std::get<1>(minmax).item().toLong();
TORCH_CHECK(vmin >= 0 && vmax < sorter.sizes().back(), "torch.searchsorted(): sorter index out of range");
}
}
TORCH_CHECK(input.dim() > 0 || (input.dim() == 0 && input.numel() == 1 && boundaries.dim() == 1),
"torch.searchsorted(): input value can be a scalar only when boundaries tensor dimension is 1, but we got ",
"boundaries tensor dim(", boundaries.dim(), ") and input value's dim(", input.dim(), ") numel(",
input.numel(), ")");
TORCH_CHECK(boundaries.dim() != 0, "torch.searchsorted(): boundaries tensor should have positive dimension, but ",
"got 0 dimension");
TORCH_CHECK(boundaries.dim() == 1 || searchsorted_dims_matched_before_last_dim(boundaries, input),
"torch.searchsorted(): boundaries tensor should be 1 dimension or the first N-1 dimensions of boundaries tensor ",
"and input value tensor must match, but we got boundaries tensor ", boundaries.sizes(), " and input value tensor ",
input.sizes());
ScalarType output_dtype = output.scalar_type();
TORCH_CHECK(
(output_dtype == ScalarType::Long && !out_int32) ||
(output_dtype == ScalarType::Int && out_int32),
"torch.searchsorted(): output tensor's dtype is wrong, it can only be Int(int32) or Long(int64) depending on ",
"whether out_int32 flag is True, but we got output tensor's dtype ", output_dtype,
" and out_int32 flag is ", (out_int32 ? "True" : "False"));
if (out_int32) {
TORCH_CHECK(boundaries.sizes().back() < INT_MAX,
"torch.searchsorted(): the size of boundaries' last dimension should be less than ", INT_MAX, ", but we got ",
boundaries.sizes().back());
}
}
} // namespace at::native
| 7,789
| 43.770115
| 119
|
h
|
null |
pytorch-main/aten/src/ATen/native/CPUBlas.h
|
#pragma once
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TransposeType.h>
#include <c10/util/complex.h>
#include <c10/core/ScalarType.h>
#include <c10/core/Scalar.h>
namespace at::native::cpublas {
namespace internal {
void normalize_last_dims(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
int64_t *lda, int64_t *ldb, int64_t *ldc);
} // namespace internal
using gemm_fn = void(*)(
at::ScalarType type,
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
const Scalar& alpha,
const void *a, int64_t lda,
const void *b, int64_t ldb,
const Scalar& beta,
void *c, int64_t ldc);
DECLARE_DISPATCH(gemm_fn, gemm_stub);
template <typename scalar_t>
void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
at::opmath_type<scalar_t> alpha,
const scalar_t *a, int64_t lda,
const scalar_t *b, int64_t ldb,
at::opmath_type<scalar_t> beta,
scalar_t *c, int64_t ldc) {
internal::normalize_last_dims(transa, transb, m, n, k, &lda, &ldb, &ldc);
gemm_stub(
kCPU, c10::CppTypeToScalarType<scalar_t>::value,
transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
double alpha,
const double *a, int64_t lda,
const double *b, int64_t ldb,
double beta,
double *c, int64_t ldc);
void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
float alpha,
const float *a, int64_t lda,
const float *b, int64_t ldb,
float beta,
float *c, int64_t ldc);
void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
float alpha,
const at::BFloat16 *a, int64_t lda,
const at::BFloat16 *b, int64_t ldb,
float beta,
at::BFloat16 *c, int64_t ldc);
void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
c10::complex<double> alpha,
const c10::complex<double> *a, int64_t lda,
const c10::complex<double> *b, int64_t ldb,
c10::complex<double> beta,
c10::complex<double> *c, int64_t ldc);
void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
c10::complex<float> alpha,
const c10::complex<float> *a, int64_t lda,
const c10::complex<float> *b, int64_t ldb,
c10::complex<float> beta,
c10::complex<float> *c, int64_t ldc);
void gemm(
TransposeType transa, TransposeType transb,
int64_t m, int64_t n, int64_t k,
int64_t alpha,
const int64_t *a, int64_t lda,
const int64_t *b, int64_t ldb,
int64_t beta,
int64_t *c, int64_t ldc);
template <typename scalar_t>
void gemm_batched(
TransposeType transa, TransposeType transb,
int64_t batch_size, int64_t m, int64_t n, int64_t k,
scalar_t alpha,
const scalar_t * const *a, int64_t lda,
const scalar_t * const *b, int64_t ldb,
const scalar_t beta,
scalar_t * const *c, int64_t ldc);
template <typename scalar_t>
void gemm_batched_with_stride(
TransposeType transa, TransposeType transb,
int64_t batch_size, int64_t m, int64_t n, int64_t k,
scalar_t alpha,
const scalar_t *a, int64_t lda, int64_t batch_stride_a,
const scalar_t *b, int64_t ldb, int64_t batch_stride_b,
scalar_t beta,
scalar_t *c, int64_t ldc, int64_t batch_stride_c);
using axpy_fn = void(*)(at::ScalarType type, int64_t n, const Scalar& a, const void *x, int64_t incx, void *y, int64_t incy);
DECLARE_DISPATCH(axpy_fn, axpy_stub);
template<typename scalar_t>
void axpy(int64_t n, scalar_t a, const scalar_t *x, int64_t incx, scalar_t *y, int64_t incy){
if(n == 1)
{
incx = 1;
incy = 1;
}
axpy_stub(
kCPU, c10::CppTypeToScalarType<scalar_t>::value,
n, a, x, incx, y, incy);
}
void axpy(int64_t n, double a, const double *x, int64_t incx, double *y, int64_t incy);
void axpy(int64_t n, float a, const float *x, int64_t incx, float *y, int64_t incy);
void axpy(int64_t n, c10::complex<double> a, const c10::complex<double> *x, int64_t incx, c10::complex<double> *y, int64_t incy);
void axpy(int64_t n, c10::complex<float> a, const c10::complex<float> *x, int64_t incx, c10::complex<float> *y, int64_t incy);
using copy_fn = void(*)(at::ScalarType type, int64_t n, const void *x, int64_t incx, void *y, int64_t incy);
DECLARE_DISPATCH(copy_fn, copy_stub);
template<typename scalar_t>
void copy(int64_t n, const scalar_t *x, int64_t incx, scalar_t *y, int64_t incy) {
if(n == 1)
{
incx = 1;
incy = 1;
}
copy_stub(
kCPU, c10::CppTypeToScalarType<scalar_t>::value,
n, x, incx, y, incy);
}
void copy(int64_t n, const double *x, int64_t incx, double *y, int64_t incy);
void copy(int64_t n, const float *x, int64_t incx, float *y, int64_t incy);
void copy(int64_t n, const c10::complex<double> *x, int64_t incx, c10::complex<double> *y, int64_t incy);
void copy(int64_t n, const c10::complex<float> *x, int64_t incx, c10::complex<float> *y, int64_t incy);
} // namespace at::native::cpublas
| 5,184
| 30.809816
| 129
|
h
|
null |
pytorch-main/aten/src/ATen/native/CPUFallback.h
|
#pragma once
#include <ATen/core/ivalue.h>
#include <ATen/core/stack.h>
#include <ATen/core/boxing/KernelFunction.h>
#include <ATen/core/dispatch/Dispatcher.h>
#include <c10/util/Metaprogramming.h>
#include <torch/library.h>
namespace at::native {
// This function implements a boxed fallback to CPU.
// External backends can add their own custom logging on top if it to customize their own CPU fallbacks.
TORCH_API void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool error_on_views = false);
// This is a helper function that backends can use to directly call their boxed CPU fallback
// TODO: update and add a usage example after https://github.com/pytorch/pytorch/pull/58092 lands.
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op, bool symint, class ReturnType, class... ParameterTypes>
struct _call_fallback_fn final {};
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op, bool symint, class ReturnType, class... ParameterTypes>
struct _call_fallback_fn<fallback_fn, Op, symint, ReturnType(ParameterTypes...)> final {
static ReturnType call(typename c10::maybe_keep_symint<symint, ParameterTypes>::type... args) {
auto op = c10::Dispatcher::singleton()
// TODO: figure out how to make compiler happy without dynamic casts
.findSchemaOrThrow((const char*) Op::name, (const char*) Op::overload_name)
//.findSchemaOrThrow("a", "b")
.typed<ReturnType (typename c10::maybe_keep_symint<symint, ParameterTypes>::type...)>();
return c10::impl::BoxedKernelWrapper<ReturnType (typename c10::maybe_keep_symint<symint, ParameterTypes>::type...)>::call(
c10::BoxedKernel::makeFromFunction<fallback_fn>(),
op,
c10::DispatchKeySet(), // we know that the cpu_fallback doesn't use the dispatch keyset.
// TODO: get std::forward<> to work
args...
);
}
};
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
using call_fallback_fn_symint = _call_fallback_fn<fallback_fn, Op, true, typename Op::schema>;
template<c10::KernelFunction::BoxedKernelFunction* fallback_fn, class Op>
using call_fallback_fn = _call_fallback_fn<fallback_fn, Op, false, typename Op::schema>;
} // namespace at::native
| 2,326
| 49.586957
| 130
|
h
|
null |
pytorch-main/aten/src/ATen/native/CompositeRandomAccessor.h
|
#pragma once
#include <ATen/native/CompositeRandomAccessorCommon.h>
namespace at::native {
struct TupleInfoCPU {
template <typename ...Types>
using tuple = std::tuple<Types...>;
template <typename ...Types>
static constexpr auto tie(Types&... args) noexcept {
return std::tie(args...);
}
};
template <typename KeyAccessor, typename ValueAccessor>
using CompositeRandomAccessorCPU =
CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfoCPU>;
template <typename Values, typename References>
void swap(
references_holder<Values, References> rh1,
references_holder<Values, References> rh2
) {
return std::swap(rh1.data(), rh2.data());
}
template <int N, typename Values, typename References>
auto get(references_holder<Values, References> rh) -> decltype(std::get<N>(rh.data())) {
return std::get<N>(rh.data());
}
} // namespace at::native
| 876
| 24.057143
| 88
|
h
|
null |
pytorch-main/aten/src/ATen/native/CompositeRandomAccessorCommon.h
|
#include <utility>
#pragma once
namespace at::native {
namespace {
// operator_brackets_proxy is used in
// CompositeRandomAccessor in place of operator[].
// For some iterators, references returned by operator[]
// could become invalid, operator_brackets_proxy tries to
// resolve that by making accessor[n] to be equivalent to
// *(accessor + n).
template <typename Accessor>
class operator_brackets_proxy {
using reference = typename std::iterator_traits<Accessor>::reference;
using value_type = typename std::iterator_traits<Accessor>::value_type;
public:
C10_HOST_DEVICE
operator_brackets_proxy(Accessor const& accessor)
: accessor(accessor)
{}
C10_HOST_DEVICE
operator reference() {
return *accessor;
}
C10_HOST_DEVICE
reference operator*() {
return *accessor;
}
C10_HOST_DEVICE
operator_brackets_proxy& operator=(value_type const& val) {
*accessor = val;
return *this;
}
private:
Accessor accessor;
};
}
// references_holder is used as a surrogate for the
// references type from std::iterator_traits in CompositeRandomAccessor.
// It is assumed in CompositeRandomAccessor that
// References = tuple<Types&...>,
// Values = tuple<Types...> by default,
// but they could be anything as long as References could be
// cast to Values.
// If you plan to use it with STL, for example, you will need to
// define 'swap` and `get`(aka std::get) methods.
template <typename Values, typename References>
class references_holder {
public:
using values = Values;
using references = References;
C10_HOST_DEVICE
references_holder(references refs)
: refs{std::move(refs)}
{}
C10_HOST_DEVICE
operator references() {
return refs;
}
C10_HOST_DEVICE
operator values() {
return refs;
}
C10_HOST_DEVICE
references_holder& operator=(values vals) {
refs = vals;
return *this;
}
C10_HOST_DEVICE
references& data() {
return refs;
}
protected:
references refs;
};
// CompositeRandomAccessor is essentially a simplified version of
// a random access iterator over two random access iterators.
// TupleInfo should contain a variadic type `tuple`, and a method `tie`,
// which constructs a tuple of references from a variadic list of arguments.
template <typename KeyAccessor, typename ValueAccessor, typename TupleInfo>
class CompositeRandomAccessor {
using self_type = CompositeRandomAccessor<KeyAccessor, ValueAccessor, TupleInfo>;
using key_accessor_value_type =
typename std::iterator_traits<KeyAccessor>::value_type;
using value_accessor_value_type =
typename std::iterator_traits<ValueAccessor>::value_type;
using key_accessor_reference_type =
typename std::iterator_traits<KeyAccessor>::reference;
using value_accessor_reference_type =
typename std::iterator_traits<ValueAccessor>::reference;
using composite_value_type = typename TupleInfo::template tuple<
key_accessor_value_type,
value_accessor_value_type>;
using composite_reference = typename TupleInfo::template tuple<
key_accessor_reference_type,
value_accessor_reference_type>;
public:
using value_type = composite_value_type;
using reference = references_holder<composite_value_type, composite_reference>;
// Note that CompositeRandomAccessor does not hold key and values
// in a specific datastrcture, which means that a pointer to a (key, value)
// is not defined. Hence we just use a pointer type of the KeyAccessor.
using pointer = typename std::iterator_traits<KeyAccessor>::pointer;
using difference_type = typename std::iterator_traits<KeyAccessor>::difference_type;
using iterator_category = std::random_access_iterator_tag;
C10_HOST_DEVICE
CompositeRandomAccessor() = default;
C10_HOST_DEVICE
CompositeRandomAccessor(KeyAccessor keys, ValueAccessor values)
: keys(keys), values(values)
{}
// Pointer-like operations {
C10_HOST_DEVICE
reference operator*() const {
return TupleInfo::tie(*keys, *values);
}
// operator->() is supposed to return a pointer type.
// Since CompositeRandomAccessor does not hold pointers to pairs,
// we just return a pointer to a key.
C10_HOST_DEVICE
auto* operator->() const {
return keys.operator->();
}
C10_HOST_DEVICE
reference operator[](difference_type idx) {
return operator_brackets_proxy<self_type>(
CompositeRandomAccessor(keys + idx, values + idx)
);
}
// }
// Prefix/postfix increment/decrement {
C10_HOST_DEVICE
CompositeRandomAccessor& operator++() {
++keys;
++values;
return *this;
}
C10_HOST_DEVICE
CompositeRandomAccessor operator++(int) {
CompositeRandomAccessor copy(*this);
++*this;
return copy;
}
C10_HOST_DEVICE
CompositeRandomAccessor& operator--() {
--keys;
--values;
return *this;
}
C10_HOST_DEVICE
CompositeRandomAccessor operator--(int) {
CompositeRandomAccessor copy(*this);
--*this;
return copy;
}
// }
// Arithmetic operations {
C10_HOST_DEVICE
CompositeRandomAccessor& operator+=(difference_type offset) {
keys += offset;
values += offset;
return *this;
}
C10_HOST_DEVICE
CompositeRandomAccessor operator+(difference_type offset) const {
return CompositeRandomAccessor(keys + offset, values + offset);
}
C10_HOST_DEVICE
friend CompositeRandomAccessor operator+(
difference_type offset,
const CompositeRandomAccessor& accessor
) {
return accessor + offset;
}
C10_HOST_DEVICE
CompositeRandomAccessor& operator-=(difference_type offset) {
keys -= offset;
values -= offset;
return *this;
}
C10_HOST_DEVICE
CompositeRandomAccessor operator-(difference_type offset) const {
return CompositeRandomAccessor(keys - offset, values - offset);
}
C10_HOST_DEVICE
difference_type operator-(const CompositeRandomAccessor& other) const {
return keys - other.keys;
}
// }
// Comparison operators {
C10_HOST_DEVICE
bool operator==(const CompositeRandomAccessor& other) const {
return keys == other.keys;
}
C10_HOST_DEVICE
bool operator!=(const CompositeRandomAccessor& other) const {
return keys != other.keys;
}
C10_HOST_DEVICE
bool operator<(const CompositeRandomAccessor& other) const {
return keys < other.keys;
}
C10_HOST_DEVICE
bool operator<=(const CompositeRandomAccessor& other) const {
return keys <= other.keys;
}
C10_HOST_DEVICE
bool operator>(const CompositeRandomAccessor& other) const {
return keys > other.keys;
}
C10_HOST_DEVICE
bool operator>=(const CompositeRandomAccessor& other) const {
return keys >= other.keys;
}
// }
protected:
KeyAccessor keys;
ValueAccessor values;
};
} // namespace at::native
| 6,732
| 24.503788
| 86
|
h
|
null |
pytorch-main/aten/src/ATen/native/ConvUtils.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/TensorUtils.h>
#include <ATen/detail/CUDAHooksInterface.h>
#include <ATen/native/DispatchStub.h>
#include <c10/util/env.h>
#include <c10/util/irange.h>
namespace at::native {
using conv_depthwise2d_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, std::array<bool, 2>);
DECLARE_DISPATCH(conv_depthwise2d_backward_fn, conv_depthwise2d_backward_stub);
using conv_depthwise3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
DECLARE_DISPATCH(conv_depthwise3d_backward_fn, conv_depthwise3d_backward_stub);
using cudnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
DECLARE_DISPATCH(cudnn_convolution_backward_fn, cudnn_convolution_backward_stub);
using mps_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, int64_t, std::array<bool,3>);
DECLARE_DISPATCH(mps_convolution_backward_fn, mps_convolution_backward_stub);
using cudnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, bool, std::array<bool,2>);
DECLARE_DISPATCH(cudnn_convolution_transpose_backward_fn, cudnn_convolution_transpose_backward_stub);
using miopen_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
DECLARE_DISPATCH(miopen_convolution_backward_fn, miopen_convolution_backward_stub);
using miopen_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
DECLARE_DISPATCH(miopen_convolution_transpose_backward_fn, miopen_convolution_transpose_backward_stub);
using miopen_depthwise_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, int64_t, bool, bool, std::array<bool,3>);
DECLARE_DISPATCH(miopen_depthwise_convolution_backward_fn, miopen_depthwise_convolution_backward_stub);
using mkldnn_convolution_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, int64_t, std::array<bool,3>);
DECLARE_DISPATCH(mkldnn_convolution_backward_fn, mkldnn_convolution_backward_stub);
using mkldnn_convolution_transpose_fn = Tensor(*)(const Tensor&, const Tensor&, const c10::optional<Tensor>&,
IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t);
DECLARE_DISPATCH(mkldnn_convolution_transpose_fn, mkldnn_convolution_transpose_stub);
using mkldnn_convolution_transpose_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, int64_t, std::array<bool,3>);
DECLARE_DISPATCH(mkldnn_convolution_transpose_backward_fn, mkldnn_convolution_transpose_backward_stub);
using slow_conv_dilated2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
DECLARE_DISPATCH(slow_conv_dilated2d_backward_fn, slow_conv_dilated2d_backward_stub);
using slow_conv_dilated3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, std::array<bool, 3>);
DECLARE_DISPATCH(slow_conv_dilated3d_backward_fn, slow_conv_dilated3d_backward_stub);
using slow_conv_transpose2d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
DECLARE_DISPATCH(slow_conv_transpose2d_backward_fn, slow_conv_transpose2d_backward_stub);
using slow_conv_transpose3d_backward_fn = std::tuple<at::Tensor,at::Tensor,at::Tensor>(*)(
const at::Tensor&, const at::Tensor&, const at::Tensor&, at::IntArrayRef, at::IntArrayRef,
at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, std::array<bool,3>);
DECLARE_DISPATCH(slow_conv_transpose3d_backward_fn, slow_conv_transpose3d_backward_stub);
namespace {
static bool cudnnv8_heuristic_mode_b = c10::utils::check_env("TORCH_CUDNN_USE_HEURISTIC_MODE_B") == true;
}
static inline bool cudnnv8_enabled_check_debug() {
static bool cudnnv8_flag = c10::utils::check_env("TORCH_CUDNN_V8_API_DISABLED") != true;
static bool cudnnv8_debug = c10::utils::check_env("TORCH_CUDNN_V8_API_DEBUG") == true;
static uint8_t cudnnv8_debugcount = 0;
if (cudnnv8_debug == 1 && cudnnv8_debugcount < 10) {
TORCH_WARN("TORCH_CUDNN_V8_DEBUG ON, V8 ON: ", cudnnv8_flag, " TORCH_CUDNN_USE_HEURISTIC_MODE B: ", cudnnv8_heuristic_mode_b);
cudnnv8_debugcount++;
}
return cudnnv8_flag == 1;
}
static inline bool cudnnv8_use_heur_mode_b() {
return cudnnv8_heuristic_mode_b;
}
// Keep in sync with py::enum_ in Module.cpp
enum class ConvBackend {
CudaDepthwise2d,
CudaDepthwise3d,
Cudnn,
CudnnTranspose,
Empty,
Miopen,
MiopenDepthwise,
MiopenTranspose,
Mkldnn,
MkldnnTranspose,
MkldnnEmpty,
NnpackSpatial,
Overrideable,
Slow2d,
Slow3d,
SlowDilated2d,
SlowDilated3d,
SlowTranspose2d,
SlowTranspose3d,
Winograd3x3Depthwise,
Xnnpack2d,
Mps,
MpsTranspose,
};
// Overload for selecting the convolution backend from the full set of convolution inputs.
// This overload is exposed to python for testing, etc.
TORCH_API ConvBackend select_conv_backend(
const Tensor& input, const Tensor& weight, const c10::optional<Tensor>& bias_opt,
IntArrayRef stride, SymIntArrayRef padding, IntArrayRef dilation,
bool transposed, SymIntArrayRef output_padding, int64_t groups, const at::OptionalSymIntArrayRef bias_sizes_opt);
TORCH_API at::MemoryFormat _determine_backend_memory_format(const Tensor& input,
const Tensor& weight,
const ConvBackend backend);
// ---------------------------------------------------------------------
//
// Math
//
// ---------------------------------------------------------------------
constexpr int input_batch_size_dim = 0; // also grad_input
constexpr int input_channels_dim = 1;
constexpr int output_batch_size_dim = 0; // also grad_output
constexpr int output_channels_dim = 1;
constexpr int weight_output_channels_dim = 0;
constexpr int weight_input_channels_dim = 1;
// Often written as 2 + max_dim (extra dims for batch size and channels)
constexpr int max_dim = 3;
// ---------------------------------------------------------------------
//
// Checking
//
// ---------------------------------------------------------------------
// Used on pad, stride and dilation
static void check_args(CheckedFrom c, IntArrayRef args, size_t expected_size, const char* arg_name)
{
TORCH_CHECK(args.size() <= expected_size,
"Too many ", arg_name, " values (", args.size(), ") supplied, expecting ",
expected_size, " (while checking arguments for ", c, ")");
TORCH_CHECK(args.size() >= expected_size,
"Not enough ", arg_name, " values (", args.size(), ") supplied, expecting ",
expected_size, " (while checking arguments for ", c, ")");
auto num_negative_values = std::count_if(args.begin(), args.end(), [](int x){return x < 0;});
if (num_negative_values > 0){
std::stringstream ss;
ss << arg_name << " should be greater than zero but got (";
std::copy(args.begin(), args.end() - 1, std::ostream_iterator<int>(ss,", "));
ss << args.back() << ")" << " (while checking arguments for " << c << ")";
AT_ERROR(ss.str());
}
}
// NOTE [ Convolution checks ]
//
// NB: For many call sites, it is not strictly necessary to check all of
// these relationships (for example, for forward convolution, we compute
// the size of output ourselves, so we don't actually need to check
// output. However, writing a single function that does everything
// means we get to reuse it for both forwards and all backwards
// variants, even when the set of "real" inputs varies. The magic of
// relational computing!
//
// (There is one downside, which is that it is slightly harder to write
// error messages which are able to distinguish between real inputs
// (which the user can change) and computed inputs (which the user can
// only indirectly affect). It would be an interesting exercise to
// come up with a general framework to handle such situations.)
static void convolution_shape_check(
CheckedFrom c,
const TensorGeometryArg& input, const TensorGeometryArg& weight, const TensorGeometryArg& output,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups)
{
check_args(c, padding, input->dim() - 2, "padding");
check_args(c, stride, padding.size(), "stride");
check_args(c, dilation, padding.size(), "dilation");
// Input
checkDimRange(c, input, 3, 6 /* exclusive */);
checkSize_symint(c, input, input_channels_dim, weight->size(1) * groups);
// Weight
checkSameDim(c, input, weight);
// TODO: check that output->size() matches output_sizes
// TODO: check that weight matches output->sizes()
checkSameDim(c, input, output);
}
// NB: conv_output_size and conv_input_size are not bijections,
// as conv_output_size loses information; this is why conv_input_size
// takes an extra output_padding argument to resolve the ambiguity.
template <typename T>
static inline std::vector<T> _conv_output_size(
ArrayRef<T> input_size, ArrayRef<T> weight_size,
ArrayRef<T> padding, IntArrayRef stride, IntArrayRef dilation = IntArrayRef()
) {
// ASSERT(input_size.size() > 2)
// ASSERT(input_size.size() == weight_size.size())
bool has_dilation = !dilation.empty();
auto dim = input_size.size();
std::vector<T> output_size(dim);
output_size[0] = input_size[input_batch_size_dim];
output_size[1] = weight_size[weight_output_channels_dim];
for (const auto d : c10::irange(2, dim)) {
auto dilation_ = has_dilation ? dilation[d - 2] : 1;
auto kernel = dilation_ * (weight_size[d] - 1) + 1;
output_size[d] = (input_size[d] + (2 * padding[d - 2]) - kernel) / stride[d - 2] + 1;
}
return output_size;
}
static inline std::vector<int64_t> conv_output_size(
IntArrayRef input_size, IntArrayRef weight_size,
IntArrayRef padding, IntArrayRef stride, IntArrayRef dilation = IntArrayRef()
) {
return _conv_output_size(input_size, weight_size, padding, stride, dilation);
}
static inline std::vector<c10::SymInt> conv_output_size(
SymIntArrayRef input_size, SymIntArrayRef weight_size,
SymIntArrayRef padding, IntArrayRef stride, IntArrayRef dilation = IntArrayRef()
) {
return _conv_output_size(input_size, weight_size, padding, stride, dilation);
}
template <typename T>
std::vector<T> _conv_input_size(
ArrayRef<T> output_size, ArrayRef<T> weight_size,
ArrayRef<T> padding, ArrayRef<T> output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
) {
// ASSERT(output_size.size() > 2)
// ASSERT(output_size.size() == weight_size.size())
auto dim = output_size.size();
std::vector<T> input_size(dim);
input_size[0] = output_size[output_batch_size_dim];
input_size[1] = weight_size[weight_input_channels_dim] * groups;
for (const auto d : c10::irange(2, dim)) {
auto kernel = (weight_size[d] - 1) * dilation[d - 2] + 1;
input_size[d] = (output_size[d] - 1) * stride[d - 2] - (padding[d - 2] * 2) +
kernel + output_padding[d - 2];
}
return input_size;
}
static inline std::vector<c10::SymInt> conv_input_size(
SymIntArrayRef output_size, SymIntArrayRef weight_size,
SymIntArrayRef padding, SymIntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
) {
return _conv_input_size(output_size, weight_size, padding, output_padding, stride, dilation, groups);
}
static inline std::vector<int64_t> conv_input_size(
IntArrayRef output_size, IntArrayRef weight_size,
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
) {
return _conv_input_size(output_size, weight_size, padding, output_padding, stride, dilation, groups);
}
template <typename T>
std::vector<T> _conv_weight_size(
ArrayRef<T> input_size, ArrayRef<T> output_size,
ArrayRef<T> padding, ArrayRef<T> output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
) {
auto dim = input_size.size();
std::vector<T> weight_size(dim);
weight_size[0] = output_size[1];
weight_size[1] = input_size[1] / groups;
for (const auto d : c10::irange(2, dim)) {
auto kernel = input_size[d] - (output_size[d] - 1) * stride[d - 2]
+ padding[d - 2] * 2 - output_padding[d - 2];
weight_size[d] = (kernel - 1) / dilation[d - 2] + 1;
}
return weight_size;
}
static inline std::vector<c10::SymInt> conv_weight_size(
SymIntArrayRef input_size, SymIntArrayRef output_size,
SymIntArrayRef padding, SymIntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
) {
return _conv_weight_size(input_size, output_size, padding, output_padding, stride, dilation, groups);
}
static inline std::vector<int64_t> conv_weight_size(
IntArrayRef input_size, IntArrayRef output_size,
IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups
) {
return _conv_weight_size(input_size, output_size, padding, output_padding, stride, dilation, groups);
}
static inline Tensor reshape_bias(int64_t dim, const Tensor& bias) {
std::vector<int64_t> shape(dim, 1);
shape[1] = -1;
return bias.reshape(shape);
}
static inline at::MemoryFormat cudnn_conv_suggest_memory_format(const at::Tensor& input, const at::Tensor& weight) {
// disable NHWC for float64 input.
if (!at::detail::getCUDAHooks().compiledWithCuDNN() ||
input.scalar_type() == at::kDouble ||
weight.scalar_type() == at::kDouble) {
return at::MemoryFormat::Contiguous;
}
long cudnn_version = at::detail::getCUDAHooks().versionCuDNN();
auto input_memory_format = input.suggest_memory_format();
auto weight_memory_format = weight.suggest_memory_format();
auto weight_ndim = weight.ndimension();
bool can_use_cudnn_channels_last_2d = (cudnn_version >= 7603) && (weight_ndim == 4) && (
(input_memory_format == at::MemoryFormat::ChannelsLast) ||
(weight_memory_format == at::MemoryFormat::ChannelsLast)
);
if (can_use_cudnn_channels_last_2d) {
return at::MemoryFormat::ChannelsLast;
}
bool can_use_cudnn_channels_last_3d = (cudnn_version >= 8005) && (weight_ndim == 5) && (
(input_memory_format == at::MemoryFormat::ChannelsLast3d) ||
(weight_memory_format == at::MemoryFormat::ChannelsLast3d)
);
if (can_use_cudnn_channels_last_3d) {
return at::MemoryFormat::ChannelsLast3d;
}
return at::MemoryFormat::Contiguous;
}
// controls whether emptyCache will be called following cudnn conv benchmarking
TORCH_API void _cudnn_set_conv_benchmark_empty_cache(bool enable);
TORCH_API bool _cudnn_get_conv_benchmark_empty_cache();
static inline bool miopen_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
// disable NHWC for float64 input.
if (!at::detail::getCUDAHooks().compiledWithMIOpen() ||
input.scalar_type() == at::kDouble ||
weight.scalar_type() == at::kDouble) {
return false;
}
auto input_memory_format = input.suggest_memory_format();
auto weight_memory_format = weight.suggest_memory_format();
bool can_use_miopen_channels_last_2d = (
(input_memory_format == at::MemoryFormat::ChannelsLast) ||
(weight_memory_format == at::MemoryFormat::ChannelsLast)
);
bool can_use_miopen_channels_last_3d = false;
return can_use_miopen_channels_last_2d || can_use_miopen_channels_last_3d;
}
static inline bool mkldnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
// disable NHWC for float64 input.
if (input.scalar_type() == at::kDouble ||
weight.scalar_type() == at::kDouble) {
return false;
}
// disable NHWC for MkldnnCPU tensor.
if (input.is_mkldnn() || weight.is_mkldnn()) {
return false;
}
auto input_memory_format = input.suggest_memory_format();
auto weight_memory_format = weight.suggest_memory_format();
bool can_use_mkldnn_channels_last_2d =
(input_memory_format == at::MemoryFormat::ChannelsLast) ||
(weight_memory_format == at::MemoryFormat::ChannelsLast);
// TODO: add channels last 3d support
bool can_use_mkldnn_channels_last_3d = false;
return can_use_mkldnn_channels_last_2d || can_use_mkldnn_channels_last_3d;
}
static inline bool thnn_conv_use_channels_last(const at::Tensor& input, const at::Tensor& weight) {
auto input_memory_format = input.suggest_memory_format();
auto weight_memory_format = weight.suggest_memory_format();
bool can_use_thnn_channels_last_2d = input.device().is_cpu() && (
(input_memory_format == at::MemoryFormat::ChannelsLast) || (
weight_memory_format == at::MemoryFormat::ChannelsLast));
return can_use_thnn_channels_last_2d;
}
} // namespace at::native
| 18,194
| 43.270073
| 130
|
h
|
null |
pytorch-main/aten/src/ATen/native/DilatedConvolutionUtils.h
|
#pragma once
#include <algorithm>
#include <vector>
#include <ATen/div_rtn.h>
#include <ATen/core/Tensor.h>
#include <c10/util/irange.h>
#define TORCH_CHECK_DIM_SIZE(T, DIM, DIM_SIZE, SIZE) \
TORCH_CHECK( \
T.dim() == DIM && T.size(DIM_SIZE) == SIZE, \
"Need " #T " of dimension ", \
DIM, \
" and " #T ".size[", \
DIM_SIZE, \
"] == ", \
SIZE, \
" but got input to be of shape ", \
T.sizes())
namespace at::native::internal {
namespace {
inline bool all_positive(IntArrayRef& arr) {
return std::all_of(
arr.begin(), arr.end(), [](int64_t item) { return item > 0; });
}
inline bool all_nonnegative(std::vector<int64_t>& arr) {
return std::all_of(
arr.begin(), arr.end(), [](int64_t item) { return item >= 0; });
}
} // namespace
// calculate the rear part of output tensor sizes
template <int64_t dim>
std::vector<int64_t> get_output_size(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
std::vector<int64_t> sizes;
for (const auto index : c10::irange(dim)) {
sizes.push_back(
div_rtn<int64_t>(
input.size(index + input.dim() - dim) + 2 * pad_size[index] -
(dilation_size[index] * (kernel_size[index] - 1) + 1),
stride_size[index]) +
1);
}
return sizes;
}
// calculate the sizes of output tensor
template <int64_t dim>
std::vector<int64_t> get_output_size(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
auto output_size = get_output_size<dim>(
input, kernel_size, stride_size, pad_size, dilation_size);
output_size.insert(output_size.begin(), weight.size(0));
if (input.dim() == dim + 2) {
output_size.insert(output_size.begin(), input.size(0));
}
return output_size;
}
/*
slow_conv_dilated_shape_check - check user-input to dilated convolution
forward and backward functions.
*/
template <int64_t dim>
void slow_conv_dilated_shape_check(
const Tensor& input,
const Tensor& weight,
const Tensor& bias,
const Tensor& grad_output,
IntArrayRef kernel_size,
IntArrayRef stride_size,
IntArrayRef pad_size,
IntArrayRef dilation_size) {
/*
When the following tensors are defined:
bias, grad_weight, grad_output
then these are assumed to be contiguous without checking
because of these tensors are made contiguous by calling
.contiguous() method or by resizing of zero-sized tensors in
forward/backward functions.
When grad_weight is defined then it is assumed without
checking to have the same shape as weight, see backward
functions.
*/
// Check size arguments
TORCH_CHECK(
kernel_size.size() == dim,
"kernel sizes length should be ",
dim,
", but got ",
kernel_size.size());
TORCH_CHECK(
stride_size.size() == dim,
"strides length should be ",
dim,
", but got ",
stride_size.size());
TORCH_CHECK(
dilation_size.size() == dim,
"dilations length should be ",
dim,
", but got ",
dilation_size.size());
TORCH_CHECK(
pad_size.size() == dim,
"pads length should be ",
dim,
", but got ",
pad_size.size());
TORCH_CHECK(
all_positive(kernel_size),
"kernel size should be greater than zero, but got ",
kernel_size);
TORCH_CHECK(
all_positive(stride_size),
"stride should be greater than zero, but got ",
stride_size);
TORCH_CHECK(
all_positive(dilation_size),
"dilation should be greater than zero, but got ",
dilation_size);
// check input
TORCH_CHECK(input.defined(), "input must be defined");
bool is_batch = input.dim() == dim + 2;
int64_t n = (is_batch ? 2 : 1);
int64_t ndim = n + dim;
if (!is_batch) {
// input dim has to be dim + 1 if not batched
TORCH_CHECK(
input.dim() == dim + 1,
"input must be 4D or 5D tensor but got ",
input.dim(),
"D tensor");
}
// check output sizes
auto output_size = get_output_size<dim>(
input, kernel_size, stride_size, pad_size, dilation_size);
TORCH_CHECK(
all_nonnegative(output_size),
"calculated output size ",
output_size,
" is too small (all sizes must be non-negative)");
// check weight
TORCH_CHECK(weight.defined(), "weight must be defined");
TORCH_CHECK(
weight.dim() == dim + 2,
"weight must be ",
dim + 2,
"D tensor but got ",
weight.dim(),
"D tensor dim=",
dim);
TORCH_CHECK(
weight.sizes().slice(2) == kernel_size,
"weight[2:] shape ",
weight.sizes().slice(2),
" must be equal to kernel_size ",
kernel_size);
TORCH_CHECK_DIM_SIZE(input, input.dim(), (is_batch ? 1 : 0), weight.size(1));
// check bias when present
if (bias.defined()) {
TORCH_CHECK(
bias.dim() == 1,
"bias must be 1D tensor but got ",
bias.dim(),
"D tensor");
TORCH_CHECK_DIM_SIZE(bias, 1, 0, weight.size(0));
}
// check grad_output when present
if (grad_output.defined()) {
TORCH_CHECK(
grad_output.dim() == ndim,
"grad_output must be ",
ndim,
"D tensor but got ",
grad_output.dim(),
"D tensor");
if (is_batch) {
TORCH_CHECK(
grad_output.size(0) == input.size(0),
"grad_output.size(0)=",
grad_output.size(0),
" must be input.size(0)=",
input.size(0));
}
TORCH_CHECK(
grad_output.size(n - 1) == weight.size(0),
"grad_output.size(",
n - 1,
")=",
grad_output.size(n - 1),
" must be weight.size(0)=",
weight.size(0));
TORCH_CHECK(
grad_output.sizes().slice(n) == output_size,
"grad_output[",
n,
":] shape",
grad_output.sizes().slice(n),
" must be equal to output size ",
output_size);
}
}
} // namespace at::native::internal
| 6,402
| 26.83913
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/DispatchStub.h
|
#pragma once
#include <c10/core/DeviceType.h>
#include <c10/macros/Macros.h>
#include <atomic>
#include <utility>
// Implements instruction set specific function dispatch.
//
// Kernels that may make use of specialized instruction sets (e.g. AVX2) are
// compiled multiple times with different compiler flags (e.g. -mavx2). A
// DispatchStub contains a table of function pointers for a kernel. At runtime,
// the fastest available kernel is chosen based on the features reported by
// cpuinfo.
//
// Example:
//
// In native/MyKernel.h:
// using fn_type = void(*)(const Tensor& x);
// DECLARE_DISPATCH(fn_type, stub);
//
// In native/MyKernel.cpp
// DEFINE_DISPATCH(stub);
//
// In native/cpu/MyKernel.cpp:
// namespace {
// // use anonymous namespace so that different cpu versions won't conflict
// void kernel(const Tensor& x) { ... }
// }
// REGISTER_DISPATCH(stub, &kernel);
//
// To call:
// stub(kCPU, tensor);
//
// TODO: CPU instruction set selection should be folded into whatever
// the main dispatch mechanism is.
// ignore warnings about DispatchStub::DEFAULT, AVX, AVX2 defined elsewhere
C10_CLANG_DIAGNOSTIC_PUSH()
C10_CLANG_DIAGNOSTIC_IGNORE("-Wundefined-var-template")
namespace at::native {
enum class CPUCapability {
DEFAULT = 0,
#if defined(HAVE_VSX_CPU_DEFINITION)
VSX = 1,
#elif defined(HAVE_ZVECTOR_CPU_DEFINITION)
ZVECTOR = 1,
#else
AVX2 = 1,
AVX512 = 2,
#endif
NUM_OPTIONS
};
CPUCapability get_cpu_capability();
template <typename FnPtr, typename T>
struct DispatchStub;
/**
* The sole purpose of this class is to outline methods that don't need to be
* specialized or otherwise inlined and duplicated (by the compiler due to
* template expansion), since it causes size bloat if there are a significant
* number of specialization of the DispatchStub<> class.
*/
struct TORCH_API DispatchStubImpl {
void* get_call_ptr(
c10::DeviceType device_type
, void *DEFAULT
#ifdef HAVE_AVX512_CPU_DEFINITION
, void *AVX512
#endif
#ifdef HAVE_AVX2_CPU_DEFINITION
, void *AVX2
#endif
#ifdef HAVE_VSX_CPU_DEFINITION
, void *VSX
#endif
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
, void *ZVECTOR
#endif
);
/**
* The CPU Dispatch actual method is chosen in decreasing order of preference by
* DispatchStubImpl::choose_cpu_impl() in case none is found by
* DispatchStubImpl::get_call_ptr() in cpu_dispatch_ptr.
*/
void* choose_cpu_impl(
void *DEFAULT
#ifdef HAVE_AVX512_CPU_DEFINITION
, void *AVX512
#endif
#ifdef HAVE_AVX2_CPU_DEFINITION
, void *AVX2
#endif
#ifdef HAVE_VSX_CPU_DEFINITION
, void *VSX
#endif
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
, void *ZVECTOR
#endif
);
// Fixing dispatch error in Windows debug builds.
// See https://github.com/pytorch/pytorch/issues/22681 for more details.
#if defined(_MSC_VER) && defined(_DEBUG)
std::atomic<void*> cpu_dispatch_ptr;
void* cuda_dispatch_ptr;
void* hip_dispatch_ptr;
void* mps_dispatch_ptr;
void* privateuse1_dispatch_ptr;
#else
std::atomic<void*> cpu_dispatch_ptr{nullptr};
void* cuda_dispatch_ptr = nullptr;
void* hip_dispatch_ptr = nullptr;
void* mps_dispatch_ptr = nullptr;
void* privateuse1_dispatch_ptr = nullptr;
#endif
};
template <typename rT, typename T, typename... Args>
struct DispatchStub<rT (*)(Args...), T> {
using FnPtr = rT (*) (Args...);
DispatchStub() = default;
DispatchStub(const DispatchStub&) = delete;
DispatchStub& operator=(const DispatchStub&) = delete;
private:
FnPtr get_call_ptr(c10::DeviceType device_type) {
return reinterpret_cast<FnPtr>(
impl.get_call_ptr(device_type
, reinterpret_cast<void*>(DEFAULT)
#ifdef HAVE_AVX512_CPU_DEFINITION
, reinterpret_cast<void*>(AVX512)
#endif
#ifdef HAVE_AVX2_CPU_DEFINITION
, reinterpret_cast<void*>(AVX2)
#endif
#ifdef HAVE_VSX_CPU_DEFINITION
, reinterpret_cast<void*>(VSX)
#endif
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
, reinterpret_cast<void*>(ZVECTOR)
#endif
)
);
}
public:
template <typename... ArgTypes>
rT operator()(c10::DeviceType device_type, ArgTypes&&... args) {
FnPtr call_ptr = get_call_ptr(device_type);
return (*call_ptr)(std::forward<ArgTypes>(args)...);
}
void set_cuda_dispatch_ptr(FnPtr fn_ptr) {
impl.cuda_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
}
void set_hip_dispatch_ptr(FnPtr fn_ptr) {
impl.hip_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
}
void set_mps_dispatch_ptr(FnPtr fn_ptr) {
impl.mps_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
}
void set_privateuse1_dispatch_ptr(FnPtr fn_ptr) {
impl.privateuse1_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
}
static TORCH_API FnPtr DEFAULT;
#ifdef HAVE_AVX512_CPU_DEFINITION
static TORCH_API FnPtr AVX512;
#endif
#ifdef HAVE_AVX2_CPU_DEFINITION
static TORCH_API FnPtr AVX2;
#endif
#ifdef HAVE_VSX_CPU_DEFINITION
static TORCH_API FnPtr VSX;
#endif
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
static TORCH_API FnPtr ZVECTOR;
#endif
private:
DispatchStubImpl impl;
};
namespace {
template <typename DispatchStub>
struct RegisterCUDADispatch {
RegisterCUDADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
stub.set_cuda_dispatch_ptr(value);
}
};
template <typename DispatchStub>
struct RegisterMPSDispatch {
RegisterMPSDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
stub.set_mps_dispatch_ptr(value);
}
};
template <typename DispatchStub>
struct RegisterHIPDispatch {
RegisterHIPDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
// TODO: make this point at hip_dispatch_ptr
stub.set_cuda_dispatch_ptr(value);
}
};
template <typename DispatchStub>
struct RegisterPRIVATEUSE1Dispatch {
RegisterPRIVATEUSE1Dispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
stub.set_privateuse1_dispatch_ptr(value);
}
};
} // anonymous namespace
// Compiler will complain if you put things like std::tuple<Tensor, Tensor> in
// the `fn` argument of DECLARE_DISPATCH. Some possible workarounds, e.g.,
// adding parentheses and using helper struct to get rid of the parentheses, do
// not work with MSVC. So do a `using`-declaration if you need to pass in such
// `fn`, e.g., grid_sampler_2d_backward_cpu_kernel in GridSampleKernel.h.
#define DECLARE_DISPATCH(fn, name) \
struct name : DispatchStub<fn, name> { \
name() = default; \
name(const name&) = delete; \
name& operator=(const name&) = delete; \
}; \
extern TORCH_API struct name name
#define DEFINE_DISPATCH(name) struct name name
#define REGISTER_ARCH_DISPATCH(name, arch, fn) \
template <> name::FnPtr TORCH_API DispatchStub<name::FnPtr, struct name>::arch = fn;
#ifdef HAVE_AVX512_CPU_DEFINITION
#define REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX512, fn)
#else
#define REGISTER_AVX512_DISPATCH(name, fn)
#endif
#ifdef HAVE_AVX2_CPU_DEFINITION
#define REGISTER_AVX2_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX2, fn)
#else
#define REGISTER_AVX2_DISPATCH(name, fn)
#endif
#ifdef HAVE_VSX_CPU_DEFINITION
#define REGISTER_VSX_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, VSX, fn)
#else
#define REGISTER_VSX_DISPATCH(name, fn)
#endif
#ifdef HAVE_ZVECTOR_CPU_DEFINITION
#define REGISTER_ZVECTOR_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, ZVECTOR, fn)
#else
#define REGISTER_ZVECTOR_DISPATCH(name, fn)
#endif
// Macro to register the same kernel for all CPU arch types. This is useful
// if a kernel does not benefit from being recompiled across different arch types.
#define REGISTER_ALL_CPU_DISPATCH(name, fn) \
REGISTER_ARCH_DISPATCH(name, DEFAULT, fn) \
REGISTER_AVX512_DISPATCH(name, fn) \
REGISTER_AVX2_DISPATCH(name, fn) \
REGISTER_VSX_DISPATCH(name, fn) \
REGISTER_ZVECTOR_DISPATCH(name, fn)
#define REGISTER_NO_CPU_DISPATCH(name) \
REGISTER_ALL_CPU_DISPATCH(name, nullptr)
#define REGISTER_CUDA_DISPATCH(name, fn) \
static RegisterCUDADispatch<struct name> name ## __register(name, fn);
#define REGISTER_HIP_DISPATCH(name, fn) \
static RegisterHIPDispatch<struct name> name ## __register(name, fn);
#define REGISTER_MPS_DISPATCH(name, fn) \
static RegisterMPSDispatch<struct name> name ## __register(name, fn);
#define REGISTER_PRIVATEUSE1_DISPATCH(name, fn) \
static RegisterPRIVATEUSE1Dispatch<struct name> name ## __register(name, fn);
// NB: This macro must be used in an actual 'cu' file; if you try using
// it from a 'cpp' file it will not work!
#if defined(__CUDACC__)
#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
#elif defined(__HIPCC__)
// TODO: cut this over to HIP dispatch once we stop pretending that CUDA
// is HIP in the PyTorch HIPify build.
#define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
// #define REGISTER_DISPATCH(name, fn) REGISTER_HIP_DISPATCH(name, fn)
#elif defined(__OBJC__) && defined(USE_MPS)
// NB: this macro must be used from a 'mm' file in order to dispatch a MPS kernel
#define REGISTER_DISPATCH(name, fn) REGISTER_MPS_DISPATCH(name, fn)
#elif defined(CPU_CAPABILITY)
#define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
#define REGISTER_NO_AVX512_DISPATCH(name) \
REGISTER_AVX512_DISPATCH(name, nullptr)
#endif
} // namespace at::native
C10_CLANG_DIAGNOSTIC_POP()
| 9,648
| 29.827476
| 87
|
h
|
null |
pytorch-main/aten/src/ATen/native/Distance.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
namespace at {
class Tensor;
namespace native {
using pdist_forward_fn = void(*)(Tensor&, const Tensor&, const double p);
using pdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&);
using cdist_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const double p);
using cdist_backward_fn = void(*)(Tensor&, const Tensor&, const Tensor&, const Tensor&, const double p, const Tensor&);
DECLARE_DISPATCH(pdist_forward_fn, pdist_forward_stub);
DECLARE_DISPATCH(pdist_backward_fn, pdist_backward_stub);
DECLARE_DISPATCH(cdist_fn, cdist_stub);
DECLARE_DISPATCH(cdist_backward_fn, cdist_backward_stub);
}} // namespace at::native
| 724
| 33.52381
| 119
|
h
|
null |
pytorch-main/aten/src/ATen/native/EmbeddingBag.h
|
#include <ATen/core/Tensor.h>
#include <ATen/Config.h>
#include <cstdint>
#ifdef USE_FBGEMM
#include <fbgemm/FbgemmEmbedding.h>
#endif
namespace at::native {
void check_arguments(
const Tensor& weight,
const Tensor& indices,
const Tensor& offsets,
const int64_t mode,
const c10::optional<Tensor>& per_sample_weights,
bool include_last_offset);
void make_bag_size_out(
Tensor& bag_size_out,
const Tensor& offsets,
const Tensor& indices,
const int64_t mode,
const bool include_last_offset,
const bool requires_grad);
void make_max_indices_out(
Tensor& max_indices_out,
const Tensor& weight,
const Tensor& indices,
const Tensor& offsets,
const Tensor& bag_size,
const int64_t mode,
bool include_last_offset);
void make_offset2bag_out(
Tensor& offset2bag,
Tensor& output,
const Tensor& weight,
const Tensor& indices,
const Tensor& offsets,
const int64_t mode,
const c10::optional<Tensor>& per_sample_weights,
const int64_t padding_idx = -1);
#ifdef USE_FBGEMM
template<bool has_weight, typename TIndex, typename TData>
struct _CallbackAndBlockSize {
using TCallback = typename fbgemm::EmbeddingSpMDMKernelSignature<TData, TIndex, TIndex, TData>::Type;
int64_t blockSize = -1;
TCallback callback = nullptr;
static TCallback generateCallback(int64_t block_size) {
return fbgemm::GenerateEmbeddingSpMDM<TData, TIndex, TIndex, TData>(
block_size,
has_weight,
/* normalize_by_lengths */false,
/* prefetch */16,
/* is_weight_positional */false,
/* use_offsets */true);
}
_CallbackAndBlockSize() = default;
explicit _CallbackAndBlockSize(c10::optional<int64_t> maybe_block_size)
: blockSize(maybe_block_size.value_or(-1))
, callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr)
{}
};
template<typename... StorageMixins>
struct _EmbeddingBagKernelCacheImpl : private StorageMixins... {
_EmbeddingBagKernelCacheImpl() = default;
// use each of the mixins to store corresponding kernel and block size
explicit _EmbeddingBagKernelCacheImpl(c10::optional<int64_t> maybe_block_size)
: StorageMixins(maybe_block_size)...
{}
// this method is thread safe (call sites may call from different threads)
template<bool has_weight, typename TIndex, typename TData>
typename _CallbackAndBlockSize<has_weight, TIndex, TData>::TCallback
getCallback(int64_t block_size) const {
// if the cache doesn't store the kernel for the incoming block size
// (so it is different from the one stored in corresponding mixin)
// regenerate the kernel (not writing it into the cache so we avoid locks)
if (block_size != _CallbackAndBlockSize<has_weight, TIndex, TData>::blockSize) {
return _CallbackAndBlockSize<has_weight, TIndex, TData>::generateCallback(block_size);
}
// else retrieve the cached kernel from the corresponding mixin
return _CallbackAndBlockSize<has_weight, TIndex, TData>::callback;
}
};
// instantiate the cache with the list of storage mixins
// for each of the 8 _EmbeddingBagKernelCache* usages in the EmbeddingBag.cpp impl file
using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl<
_CallbackAndBlockSize<true, int32_t, float>,
_CallbackAndBlockSize<false, int32_t, float>,
_CallbackAndBlockSize<true, int64_t, float>,
_CallbackAndBlockSize<false, int64_t, float>,
_CallbackAndBlockSize<true, int32_t, unsigned short>,
_CallbackAndBlockSize<false, int32_t, unsigned short>,
_CallbackAndBlockSize<true, int64_t, unsigned short>,
_CallbackAndBlockSize<false, int64_t, unsigned short>>;
#else
struct _EmbeddingBagKernelCache {
explicit _EmbeddingBagKernelCache(c10::optional<int64_t> /* maybe_block_size */) {}
};
#endif
void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag,
Tensor& bag_size, Tensor* max_indices,
const Tensor &weight, const Tensor &indices,
const Tensor &offsets, const int64_t mode = 0,
const c10::optional<Tensor>& per_sample_weights = c10::nullopt,
bool include_last_offset = false,
int64_t padding_idx = -1,
_EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
void _embedding_bag_cpu_out(
at::Tensor& output,
at::Tensor& offset2bag,
at::Tensor& bag_size,
at::Tensor* p_max_indices,
const at::Tensor& weight,
const at::Tensor& indices,
const at::Tensor& offsets,
const bool scale_grad_by_freq,
const int64_t mode,
const bool sparse,
const c10::optional<at::Tensor>& per_sample_weights,
const bool include_last_offset,
const c10::optional<int64_t>& padding_idx,
_EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
} // namespace at::native
| 4,920
| 34.15
| 105
|
h
|
null |
pytorch-main/aten/src/ATen/native/FractionalMaxPooling.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/TensorUtils.h>
#include <c10/util/irange.h>
namespace at::native {
template<typename scalar_t>
static inline std::vector<int> generate_intervals(
scalar_t sample,
int64_t inputSize,
int64_t outputSize,
int64_t poolSize) {
std::vector<int> sequence(outputSize);
if (outputSize > 1) {
scalar_t alpha = static_cast<scalar_t>(inputSize - poolSize) /
static_cast<scalar_t>(outputSize - 1);
for (const auto i : c10::irange(outputSize - 1)) {
sequence[i] =
static_cast<int>((i + sample) * alpha) - static_cast<int>(sample * alpha);
}
}
if (outputSize > 0) {
sequence[outputSize - 1] = inputSize - poolSize;
}
return sequence;
}
template <int64_t ndim>
static inline void fractional_max_pool_check_shape(
const Tensor& input,
const Tensor& randomSamples) {
TORCH_CHECK(
input.scalar_type() == randomSamples.scalar_type(),
"Expect _random_samples to have the same dtype as input");
int64_t ndimension = randomSamples.ndimension();
TORCH_CHECK(
ndimension == 3,
"Expect _random_samples to have 3 dimensions, got ", ndimension);
int64_t N = randomSamples.size(0);
int64_t C = randomSamples.size(1);
int64_t D = randomSamples.size(2);
int64_t input_batch, input_channel;
if (ndim == 2) {
// fractional_max_pool2d
if (input.ndimension() == 3) {
input_batch = 1;
input_channel = input.size(0);
} else {
input_batch = input.size(0);
input_channel = input.size(1);
}
} else {
// factional_max_pool3d
if (input.ndimension() == 4) {
input_batch = 1;
input_channel = input.size(0);
} else {
input_batch = input.size(0);
input_channel = input.size(1);
}
}
TORCH_CHECK(
N >= input_batch,
"Expect _random_samples.size(0) no less then input batch size.");
TORCH_CHECK(
C == input_channel,
"Expect _random_samples.size(1) equals to input channel size.");
TORCH_CHECK(
D == ndim,
"Expect _random_samples.size(2) equals to ", ndim, "; got ", D, ".");
}
} // namespace at::native
| 2,159
| 25.666667
| 82
|
h
|
null |
pytorch-main/aten/src/ATen/native/GridSampler.h
|
#pragma once
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <utility>
#include <ATen/native/GridSamplerUtils.h>
namespace at::native {
using detail::GridSamplerInterpolation;
using detail::GridSamplerPadding;
// Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value,
// where we view each pixel as an area between (idx - 0.5) and (idx + 0.5).
// if align_corners: -1 and +1 get sent to the centers of the corner pixels
// -1 --> 0
// +1 --> (size - 1)
// scale_factor = (size - 1) / 2
// if not align_corners: -1 and +1 get sent to the image edges
// -1 --> -0.5
// +1 --> (size - 1) + 0.5 == size - 0.5
// scale_factor = size / 2
template <typename scalar_t>
static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size,
bool align_corners) {
if (align_corners) {
// unnormalize coord from [-1, 1] to [0, size - 1]
return ((coord + 1) / 2) * (size - 1);
} else {
// unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
return ((coord + 1) * size - 1) / 2;
}
}
// grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize
// except that it also returns the `d output / d input` via pointer argument
// `grad_in`.
// This is useful in the backward pass of grid_sampler.
template <typename scalar_t>
static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size,
bool align_corners, scalar_t *grad_in) {
if (align_corners) {
// unnormalize coord from [-1, 1] to [0, size - 1]
*grad_in = static_cast<scalar_t>(size - 1) / 2;
return ((coord + 1) / 2) * (size - 1);
} else {
// unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
*grad_in = static_cast<scalar_t>(size) / 2;
return ((coord + 1) * size - 1) / 2;
}
}
// Clips coordinates to between 0 and clip_limit - 1
template<typename scalar_t>
static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) {
return std::min(static_cast<scalar_t>(clip_limit - 1), std::max(in, static_cast<scalar_t>(0)));
}
// clip_coordinates_set_grad works similarly to clip_coordinates except that
// it also returns the `d output / d input` via pointer argument `grad_in`.
// This is useful in the backward pass of grid_sampler.
template<typename scalar_t>
static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit,
scalar_t *grad_in) {
// Note that it is important for the gradient calculation that borders
// are considered out of bounds.
if (in <= static_cast<scalar_t>(0)) {
*grad_in = static_cast<scalar_t>(0);
return static_cast<scalar_t>(0);
} else {
scalar_t max = static_cast<scalar_t>(clip_limit - 1);
if (in >= max) {
*grad_in = static_cast<scalar_t>(0);
return max;
} else {
*grad_in = static_cast<scalar_t>(1);
return in;
}
}
}
// Reflects coordinates until they fall between low and high (inclusive).
// The bounds are passed as twice their value so that half-integer values
// can be represented as ints.
template<typename scalar_t>
static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low,
int64_t twice_high) {
if (twice_low == twice_high) {
return static_cast<scalar_t>(0);
}
scalar_t min = static_cast<scalar_t>(twice_low) / 2;
scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
in = std::fabs(in - min);
// `fmod` returns same sign as `in`, which is positive after the `fabs` above.
scalar_t extra = std::fmod(in, span);
int flips = static_cast<int>(std::floor(in / span));
if (flips % 2 == 0) {
return extra + min;
} else {
return span - extra + min;
}
}
// reflect_coordinates_set_grad works similarly to reflect_coordinates except
// that it also returns the `d output / d input` via pointer argument
// `grad_in`.
// This is useful in the backward pass of grid_sampler.
template<typename scalar_t>
static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low,
int64_t twice_high, scalar_t *grad_in) {
if (twice_low == twice_high) {
*grad_in = static_cast<scalar_t>(0);
return static_cast<scalar_t>(0);
}
int grad_in_mult_;
scalar_t min = static_cast<scalar_t>(twice_low) / 2;
scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
in = in - min;
if (in < static_cast<scalar_t>(0)) {
grad_in_mult_ = -1;
in = -in;
} else {
grad_in_mult_ = 1;
}
// `fmod` returns same sign as `in`, which is positive after the `if` above.
scalar_t extra = std::fmod(in, span);
int flips = static_cast<int>(std::floor(in / span));
if (flips % 2 == 0) {
*grad_in = static_cast<scalar_t>(grad_in_mult_);
return extra + min;
} else {
*grad_in = static_cast<scalar_t>(-grad_in_mult_);
return span - extra + min;
}
}
// Mapping the out-of-boundary points back into boundary
// This would only affect padding_mode=border or reflection
template<typename scalar_t>
static inline scalar_t compute_coordinates(scalar_t coord, int64_t size,
GridSamplerPadding padding_mode,
bool align_corners) {
if (padding_mode == GridSamplerPadding::Border) {
// clip coordinates to image borders
coord = clip_coordinates(coord, size);
} else if (padding_mode == GridSamplerPadding::Reflection) {
// reflect coordinates by image borders
if (align_corners) {
coord = reflect_coordinates(coord, 0, 2*(size - 1));
} else {
coord = reflect_coordinates(coord, -1, 2*size - 1);
}
// clip coordinates to image borders
coord = clip_coordinates(coord, size);
}
return coord;
}
// Computes the pixel source index value for a grid coordinate
template <typename scalar_t>
static inline scalar_t grid_sampler_compute_source_index(
scalar_t coord,
int64_t size,
GridSamplerPadding padding_mode,
bool align_corners) {
coord = grid_sampler_unnormalize(coord, size, align_corners);
coord = compute_coordinates(coord, size, padding_mode, align_corners);
return coord;
}
// grid_sampler_compute_source_index_set_grad works similarly to
// grid_sampler_compute_source_index except that it also returns the
// `d output / d input` via pointer argument `grad_in`.
// This is useful in the backward pass of grid_sampler.
template <typename scalar_t>
static inline scalar_t grid_sampler_compute_source_index_set_grad(
scalar_t coord,
int64_t size,
GridSamplerPadding padding_mode,
bool align_corners,
scalar_t *grad_in) {
scalar_t grad_clip, grad_refl;
coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in);
if (padding_mode == GridSamplerPadding::Border) {
// clip coordinates to image borders
coord = clip_coordinates_set_grad(coord, size, &grad_clip);
*grad_in = (*grad_in) * grad_clip;
} else if (padding_mode == GridSamplerPadding::Reflection) {
// reflect coordinates by image borders
if (align_corners) {
coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl);
} else {
coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl);
}
// clip coordinates to image borders
coord = clip_coordinates_set_grad(coord, size, &grad_clip);
*grad_in = (*grad_in) * grad_refl * grad_clip;
}
return coord;
}
static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) {
return h >= 0 && h < H && w >= 0 && w < W;
}
static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) {
return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
}
template<typename scalar_t>
static inline scalar_t get_value_bounded(
scalar_t* data,
scalar_t x,
scalar_t y,
int64_t W,
int64_t H,
int64_t sW,
int64_t sH,
GridSamplerPadding padding_mode,
bool align_corners) {
x = compute_coordinates(x, W, padding_mode, align_corners);
y = compute_coordinates(y, H, padding_mode, align_corners);
int64_t ix = static_cast<int64_t>(x);
int64_t iy = static_cast<int64_t>(y);
if (within_bounds_2d(iy, ix, H, W)) {
return data[iy * sH + ix * sW];
}
return static_cast<scalar_t>(0);
}
template<typename scalar_t>
static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w,
int64_t sH, int64_t sW, int64_t H, int64_t W,
scalar_t delta) {
if (within_bounds_2d(h, w, H, W)) {
data[h * sH + w * sW] += delta;
}
}
template<typename scalar_t>
static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w,
int64_t sD, int64_t sH, int64_t sW,
int64_t D, int64_t H, int64_t W,
scalar_t delta) {
if (within_bounds_3d(d, h, w, D, H, W)) {
data[d * sD + h * sH + w * sW] += delta;
}
}
template<typename scalar_t>
static inline void add_value_bounded(
scalar_t* data,
scalar_t x,
scalar_t y,
int64_t W,
int64_t H,
int64_t sW,
int64_t sH,
scalar_t delta,
GridSamplerPadding padding_mode,
bool align_corners) {
x = compute_coordinates(x, W, padding_mode, align_corners);
y = compute_coordinates(y, H, padding_mode, align_corners);
int64_t ix = static_cast<int64_t>(x);
int64_t iy = static_cast<int64_t>(y);
safe_add_2d(data, iy, ix, sH, sW, H, W, delta);
}
// Calculate the differential of the cubic convolution, i.e. `d coeff / d x`
template<typename scalar_t>
static inline void get_cubic_coefficients_grad(
scalar_t coeffs[4],
scalar_t t) {
// Must be the same as forward calculation in
// aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients
scalar_t A = -0.75;
scalar_t x;
x = -1 - t; // 1 < x = |-1 - tx| < 2
coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A;
x = -t; // x = |0 - tx| <= 1
coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x;
x = 1 - t; // x = |1 - tx| <= 1
coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x;
x = 2 - t; // 1 < x = |2 - tx| < 2
coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A;
}
} // namespace at::native
| 10,401
| 33.789298
| 103
|
h
|
null |
pytorch-main/aten/src/ATen/native/GridSamplerUtils.h
|
#pragma once
// See NOTE: [Tensor vs. TensorBase]
// https://github.com/pytorch/pytorch/pull/66979
#include <ATen/core/TensorBase.h>
#include <ATen/native/TensorProperties.h>
#include <ATen/native/CanUse32BitIndexMath.h>
namespace at::native {
namespace detail {
enum class GridSamplerInterpolation {Bilinear, Nearest, Bicubic};
enum class GridSamplerPadding {Zeros, Border, Reflection};
} // namespace detail
using detail::GridSamplerInterpolation;
using detail::GridSamplerPadding;
namespace {
// See NOTE [ grid_sampler Native Functions ].
void check_grid_sampler_common(
const TensorBase& input,
const TensorBase& grid
) {
auto input_opt = input.options();
auto grid_opt = grid.options();
TORCH_CHECK(
input.defined(),
"grid_sampler(): expected input to not be undefined");
TORCH_CHECK(
grid.defined(),
"grid_sampler(): expected grid to not be undefined");
TORCH_CHECK(
input_opt.device() == grid_opt.device(),
"grid_sampler(): expected input and grid to be on same device, but input "
"is on ", input_opt.device(), " and grid is on ", grid_opt.device());
TORCH_CHECK(
input_opt.layout() == kStrided && grid_opt.layout() == kStrided,
"grid_sampler(): expected input and grid to have torch.strided layout, but "
"input has ", input_opt.layout(), " and grid has ", grid_opt.layout());
TORCH_CHECK(
input.size(0) == grid.size(0),
"grid_sampler(): expected grid and input to have same batch size, but got "
"input with sizes ", input.sizes(), " and grid with sizes ", grid.sizes());
TORCH_CHECK(
grid.size(-1) == input.dim() - 2,
"grid_sampler(): expected grid to have size ", input.dim() - 2, " in last "
"dimension, but got grid with sizes ", grid.sizes());
for (const auto i : c10::irange(2, input.dim())) {
TORCH_CHECK(input.size(i) > 0,
"grid_sampler(): expected input to have non-empty spatial dimensions, "
"but input has sizes ", input.sizes(), " with dimension ", i, " being "
"empty");
}
}
// See NOTE [ grid_sampler Native Functions ].
void check_grid_sampler_2d(
const TensorBase& input,
const TensorBase& grid
) {
TORCH_CHECK(
input.dim() == 4 && input.dim() == grid.dim(),
"grid_sampler(): expected 4D input and grid with same number of "
"dimensions, but got input with sizes ", input.sizes(),
" and grid with sizes ", grid.sizes());
}
// See NOTE [ grid_sampler Native Functions ].
void check_grid_sampler_3d(
const TensorBase& input,
const TensorBase& grid,
int64_t interpolation_mode
) {
TORCH_CHECK(
input.dim() == 5 && input.dim() == grid.dim(),
"grid_sampler(): expected 5D input and grid with same number of "
"dimensions, but got input with sizes ", input.sizes(),
" and grid with sizes ", grid.sizes());
TORCH_CHECK(
!(input.dim() == 5 &&
static_cast<GridSamplerInterpolation>(interpolation_mode) ==
GridSamplerInterpolation::Bicubic),
"grid_sampler(): bicubic interpolation only supports 4D input");
}
// See NOTE [ grid_sampler Native Functions ].
// cudnn does not support inputs larger than 1024.
bool cond_cudnn_grid_sampler(
const TensorBase& input,
const TensorBase& grid
) {
return (
at::native::cudnn_is_acceptable(input) &&
at::native::cudnn_is_acceptable(grid) &&
at::native::canUse32BitIndexMath(input) &&
at::native::canUse32BitIndexMath(grid) &&
input.dim() == 4 &&
input.sym_size(1) <= 1024);
}
} // anonymous namespace
} // namespace at::native
| 3,510
| 30.918182
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/IndexingUtils.h
|
#pragma once
#include <ATen/ExpandUtils.h>
#include <ATen/native/CanUse32BitIndexMath.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/core/IListRef.h>
#include <c10/util/irange.h>
namespace at::native {
[[noreturn]]
static void invalid_mask(const Tensor & self, int64_t idx, const Tensor & mask, int64_t maskIdx) {
TORCH_CHECK_INDEX(false, "The shape of the mask ", mask.sizes(), " at index ", maskIdx,
" does not match the shape of the indexed tensor ", self.sizes(), " at index ", idx);
}
static C10_UNUSED std::vector<Tensor> expandTensors(const Tensor & self, IOptTensorListRef indices) {
// If indices come in as ByteTensor or BoolTensor (masks), expand them into the equivalent indexing by LongTensors
std::vector<Tensor> result;
for (const auto& index_opt : indices) {
if (!index_opt.has_value()) {
result.emplace_back();
} else {
const auto& index = *index_opt;
if (index.scalar_type() == kByte || index.scalar_type() == kBool) {
if (index.scalar_type() == kByte) {
TORCH_WARN("indexing with dtype torch.uint8 is now deprecated," \
" please use a dtype torch.bool instead.");
}
// The sizes of the ByteTensor mask or bool tensor must match the sizes of the
// corresponding dimensions in self
for (const auto j : c10::irange(index.dim())) {
int64_t srcIdx = result.size() + j;
if (index.size(j) != self.size(srcIdx)) {
invalid_mask(self, srcIdx, index, j);
}
}
// Replace with nonzeros
auto nonzero = index.nonzero();
for (const auto j : c10::irange(index.dim())) {
result.emplace_back(nonzero.select(1, j));
}
} else {
result.emplace_back(std::move(index));
}
}
}
return result;
}
static C10_UNUSED void checkIndexTensorTypes(IOptTensorListRef indices, bool allow_int=false) {
for (const auto& tensor : indices) {
if (tensor.has_value() && tensor->defined()) {
auto scalarType = tensor->scalar_type();
if (allow_int) {
if (scalarType != kLong && scalarType != kByte && scalarType != kBool && scalarType != kInt) {
TORCH_CHECK_INDEX(false, "tensors used as indices must be long, int, byte or bool tensors");
}
} else {
if (scalarType != kLong && scalarType != kByte && scalarType != kBool) {
TORCH_CHECK_INDEX(false, "tensors used as indices must be long, byte or bool tensors");
}
}
}
}
}
inline torch::List<c10::optional<Tensor>> toListOfOptionalTensors(ArrayRef<Tensor> list) {
torch::List<c10::optional<Tensor>> result;
result.reserve(list.size());
for (const Tensor& a : list) {
result.push_back(a);
}
return result;
}
inline torch::List<c10::optional<Tensor>> toListOfOptionalTensors(ArrayRef<IValue> list) {
torch::List<c10::optional<Tensor>> result;
result.reserve(list.size());
for (const IValue& a : list) {
result.push_back(a.isTensor() ? c10::optional<Tensor>(a.toTensor()) : c10::optional<Tensor>());
}
return result;
}
static C10_UNUSED bool hasContiguousSubspace(TensorList tl) {
// true if all the non-null tensors are adjacent
auto isDefined = [](const Tensor & tensor){ return tensor.defined(); };
auto isNull = [](const Tensor & tensor){ return !tensor.defined(); };
auto start = std::find_if(tl.begin(), tl.end(), isDefined);
auto stop = std::find_if(tl.rbegin(), tl.rend(), isDefined);
auto it = std::find_if(start, stop.base(), isNull);
return it == stop.base();
}
// Transposes the tensor and indices together so that all the non-null indices
// index the first k dimensions of the tensor. Returns the transposed tensor
// and the reordered indices. For example:
// transposeToFront(tensor, {nullptr, a, nullptr, b})
// returns
// tensor.permute([1, 3, 0, 2]), {a, b, nullptr, nullptr}
static C10_UNUSED std::tuple<Tensor, std::vector<Tensor>>
transposeToFront(Tensor self, TensorList indices) {
std::vector<int64_t> dims;
std::vector<Tensor> transposedIndices;
dims.reserve(self.dim());
for (const auto i : c10::irange(self.dim())) {
if (indices[i].defined()) {
dims.push_back(i);
transposedIndices.emplace_back(indices[i]);
}
}
for (const auto i : c10::irange(self.dim())) {
if (!indices[i].defined()) {
dims.push_back(i);
transposedIndices.emplace_back();
}
}
return std::make_tuple(self.permute(dims), std::move(transposedIndices));
}
inline std::tuple<Tensor, std::vector<Tensor>, std::vector<int64_t>>
transposeToFrontAndInvPerm(Tensor self, TensorList indices) {
std::vector<int64_t> dims;
std::vector<int64_t> invPerm;
std::vector<Tensor> transposedIndices;
dims.reserve(self.dim());
invPerm.resize(self.dim());
for (const auto i : c10::irange(self.dim())) {
if (indices[i].defined()) {
dims.push_back(i);
transposedIndices.emplace_back(indices[i]);
}
}
for (const auto i : c10::irange(self.dim())) {
if (!indices[i].defined()) {
dims.push_back(i);
transposedIndices.emplace_back();
}
}
for (const auto i : c10::irange(self.dim())) {
invPerm[dims[i]] = i;
}
return std::make_tuple(self.permute(dims), std::move(transposedIndices), std::move(invPerm));
}
struct AdvancedIndex {
AdvancedIndex(const Tensor& src, TensorList indices);
Tensor src;
std::vector<Tensor> indices;
DimVector indexed_sizes;
DimVector indexed_strides;
int64_t dims_before;
int64_t dims_after;
};
} //namespace at::native
| 5,545
| 33.447205
| 116
|
h
|
null |
pytorch-main/aten/src/ATen/native/Lerp.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <ATen/OpMathType.h>
#include <ATen/TensorIterator.h>
#include <c10/core/Scalar.h>
namespace at::native {
template <typename scalar_t>
C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(scalar_t weight) {
return std::abs(weight) < scalar_t(0.5);
}
template <typename scalar_t>
C10_HOST_DEVICE C10_ALWAYS_INLINE bool is_lerp_weight_small(c10::complex<scalar_t> weight) {
// Avoid the sqrt in abs(weight)
return (weight.real() * weight.real() + weight.imag() * weight.imag()) < scalar_t(0.25);
}
template <typename scalar_t, typename weight_t>
C10_HOST_DEVICE C10_ALWAYS_INLINE scalar_t lerp(scalar_t self_, scalar_t end_, weight_t weight_) {
using opmath_t = at::opmath_type<scalar_t>;
using opmath_weight_t = at::opmath_type<weight_t>;
opmath_t self = self_;
opmath_t end = end_;
opmath_weight_t weight = weight_;
// Conditional for better numeric. This has been discussed in
// https://github.com/pytorch/pytorch/pull/18871
return is_lerp_weight_small(weight)
? self + weight * (end - self)
: end - (end - self) * (opmath_t(1) - weight);
}
using lerp_fn_scalar = void (*)(
at::TensorIteratorBase& iter,
const Scalar& weight);
using lerp_fn_tensor = void (*)(
at::TensorIteratorBase& iter);
DECLARE_DISPATCH(lerp_fn_scalar, lerp_kernel_scalar_weight);
DECLARE_DISPATCH(lerp_fn_tensor, lerp_kernel_tensor_weight);
} // namespace at::native
| 1,463
| 30.148936
| 98
|
h
|
null |
pytorch-main/aten/src/ATen/native/LossMulti.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
namespace at::native {
namespace {
static C10_UNUSED void multilabel_margin_loss_shape_check(
int64_t& nframe,
int64_t& dim,
const int64_t& ndims,
TensorArg& target_arg,
const Tensor& input,
const Tensor& target) {
bool valid_inputs = (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0;
TORCH_CHECK(
valid_inputs,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
input.sizes());
if (ndims <= 1) {
nframe = 1;
dim = ndims == 0 ? 1 : input.size(0);
TORCH_CHECK(
valid_inputs && target.dim() <= 1 && target.numel() == dim,
"inconsistent size ",
target.sizes(),
" for ",
target_arg);
} else {
nframe = input.size(0);
dim = input.size(1);
TORCH_CHECK(
valid_inputs && target.dim() == 2 && target.size(0) == nframe &&
target.size(1) == dim,
"inconsistent size ",
target.sizes(),
" for ",
target_arg);
}
}
static C10_UNUSED void multi_margin_loss_shape_check(
int64_t& nframe,
int64_t& dim,
const int64_t& ndims,
const Tensor& input,
const Tensor& target,
const c10::optional<Tensor>& weight) {
TORCH_CHECK(
(ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || ndims == 0,
"Expected non-empty vector or matrix with optional 0-dim batch size, but got: ",
input.sizes());
if (ndims <= 1) {
nframe = 1;
dim = ndims == 0 ? 1 : input.size(0);
} else {
nframe = input.size(0);
dim = input.size(1);
}
TORCH_CHECK(
target.dim() <= 1 && target.numel() == nframe,
"inconsistent target size, expected ", nframe, " but got ",
target.sizes());
if (weight && weight->defined()) {
TORCH_CHECK(
weight->dim() <= 1 && weight->numel() == dim,
"inconsistent weight size, expected ", dim, " but got ",
weight->sizes());
}
}
} // anonymous namespace
} // namespace at::native
| 2,372
| 29.037975
| 111
|
h
|
null |
pytorch-main/aten/src/ATen/native/MathBitFallThroughLists.h
|
#pragma once
namespace at {
// views and their in-place version ops
#define TORCH_VIEW_FNS(m) \
m.impl("as_strided_", torch::CppFunction::makeFallthrough()); \
m.impl("detach", torch::CppFunction::makeFallthrough()); \
m.impl("detach_", torch::CppFunction::makeFallthrough()); \
m.impl("diagonal", torch::CppFunction::makeFallthrough()); \
m.impl("expand", torch::CppFunction::makeFallthrough()); \
m.impl("expand_as", torch::CppFunction::makeFallthrough()); \
m.impl("movedim.int", torch::CppFunction::makeFallthrough()); \
m.impl("movedim.intlist", torch::CppFunction::makeFallthrough()); \
m.impl("narrow", torch::CppFunction::makeFallthrough()); \
m.impl("permute", torch::CppFunction::makeFallthrough()); \
m.impl("select.Dimname", torch::CppFunction::makeFallthrough()); \
m.impl("select.int", torch::CppFunction::makeFallthrough()); \
m.impl("squeeze", torch::CppFunction::makeFallthrough()); \
m.impl("squeeze_", torch::CppFunction::makeFallthrough()); \
m.impl("transpose.int", torch::CppFunction::makeFallthrough()); \
m.impl("transpose.Dimname", torch::CppFunction::makeFallthrough()); \
m.impl("transpose_", torch::CppFunction::makeFallthrough()); \
m.impl("t", torch::CppFunction::makeFallthrough()); \
m.impl("t_", torch::CppFunction::makeFallthrough()); \
m.impl("real", torch::CppFunction::makeFallthrough()); \
m.impl("imag", torch::CppFunction::makeFallthrough()); \
m.impl("view_as_real", torch::CppFunction::makeFallthrough()); \
m.impl("unflatten.int", torch::CppFunction::makeFallthrough()); \
m.impl("unflatten.Dimname", torch::CppFunction::makeFallthrough()); \
m.impl("unfold", torch::CppFunction::makeFallthrough()); \
m.impl("unsqueeze", torch::CppFunction::makeFallthrough()); \
m.impl("unsqueeze_", torch::CppFunction::makeFallthrough()); \
m.impl("view_as", torch::CppFunction::makeFallthrough()); \
m.impl("unbind.int", torch::CppFunction::makeFallthrough()); \
m.impl("unbind.Dimname", torch::CppFunction::makeFallthrough()); \
m.impl("split.Tensor", torch::CppFunction::makeFallthrough()); \
m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \
m.impl("swapaxes", torch::CppFunction::makeFallthrough()); \
m.impl("swapdims", torch::CppFunction::makeFallthrough()); \
m.impl("chunk", torch::CppFunction::makeFallthrough()); \
m.impl("reshape", torch::CppFunction::makeFallthrough()); \
m.impl("alias", torch::CppFunction::makeFallthrough()); \
m.impl("hsplit.int", torch::CppFunction::makeFallthrough()); \
m.impl("hsplit.array", torch::CppFunction::makeFallthrough()); \
m.impl("dsplit.int", torch::CppFunction::makeFallthrough()); \
m.impl("dsplit.array", torch::CppFunction::makeFallthrough()); \
m.impl("vsplit.int", torch::CppFunction::makeFallthrough()); \
m.impl("vsplit.array", torch::CppFunction::makeFallthrough()); \
m.impl("conj", torch::CppFunction::makeFallthrough()); \
m.impl("_conj", torch::CppFunction::makeFallthrough()); \
m.impl("_unsafe_view", torch::CppFunction::makeFallthrough()); \
m.impl("resize_", torch::CppFunction::makeFallthrough());
#define TENSOR_UTILITIES_AND_CONSTRUCTORS(m) \
m.impl("empty_like", torch::CppFunction::makeFallthrough()); \
m.impl("empty.memory_format", torch::CppFunction::makeFallthrough()); \
m.impl("empty.out", torch::CppFunction::makeFallthrough()); \
m.impl("empty_strided", torch::CppFunction::makeFallthrough()); \
m.impl("full_like", torch::CppFunction::makeFallthrough()); \
m.impl("stride.int", torch::CppFunction::makeFallthrough()); \
m.impl("stride.Dimname", torch::CppFunction::makeFallthrough()); \
m.impl("size.int", torch::CppFunction::makeFallthrough()); \
m.impl("size.Dimname", torch::CppFunction::makeFallthrough()); \
m.impl("is_complex", torch::CppFunction::makeFallthrough()); \
m.impl("is_floating_point", torch::CppFunction::makeFallthrough()); \
m.impl("requires_grad_", torch::CppFunction::makeFallthrough());
}
#define TORCH_VIEW_FNS_NATIVE_FN_REGISTRATION(m) \
m.impl("as_strided", torch::CppFunction::makeFallthrough()); \
m.impl("view", torch::CppFunction::makeFallthrough());
| 4,136
| 56.458333
| 73
|
h
|
null |
pytorch-main/aten/src/ATen/native/MaxPooling.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/Parallel.h>
#include <ATen/native/DispatchStub.h>
namespace at::native {
// TODO(Heitor) Template by dimension
struct PoolingParams1D {
int64_t NB; // Number of batches
int64_t NC; // Number of channels
int64_t IW; // Input width
int64_t OW; // Output width
int64_t KW; // Kernel width
int64_t SJ; // Column stride
int64_t PJ; // Column padding
int64_t DJ; // Column dilation
// Return index of input element for the given kernel and output index
inline int64_t index(int64_t kj, int64_t oj) const {
return oj * SJ + kj * DJ - PJ;
}
// Return index of first output within bounds for this kernel index
inline int64_t valid_output_start(int64_t kj) const {
int64_t ij = index(kj, 0);;
return ij < 0 ? at::divup(-ij, SJ) : 0;
}
// Return index one past last output within bounds for this kernel index
inline int64_t valid_output_end(int64_t kj) const {
int64_t ij = index(kj, OW - 1);
return ij >= IW ? OW - at::divup(ij - (IW - 1), SJ) : OW;
}
};
using pooling_fn = void (*)(Tensor&, const Tensor&, const PoolingParams1D&);
DECLARE_DISPATCH(pooling_fn, max_pool1d_stub);
} // namespace at::native
| 1,216
| 27.302326
| 76
|
h
|
null |
pytorch-main/aten/src/ATen/native/NonEmptyUtils.h
|
#include <ATen/core/TensorBase.h>
#include <algorithm>
#include <vector>
namespace at::native {
inline int64_t ensure_nonempty_dim(int64_t dim) {
return std::max<int64_t>(dim, 1);
}
inline int64_t ensure_nonempty_size(const TensorBase &t, int64_t dim) {
return t.dim() == 0 ? 1 : t.size(dim);
}
inline int64_t ensure_nonempty_stride(const TensorBase &t, int64_t dim) {
return t.dim() == 0 ? 1 : t.stride(dim);
}
using IdxVec = std::vector<int64_t>;
inline IdxVec ensure_nonempty_vec(IdxVec vec) {
if (vec.empty()) {
vec.push_back(1);
}
return vec;
}
} // namespace at::native
| 599
| 20.428571
| 73
|
h
|
null |
pytorch-main/aten/src/ATen/native/Padding.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/native/DispatchStub.h>
namespace at::native {
using padding_fn = void (*)(const Tensor&, const Tensor&, IntArrayRef);
// reflection padding
DECLARE_DISPATCH(padding_fn, reflection_pad1d_kernel);
DECLARE_DISPATCH(padding_fn, reflection_pad1d_backward_kernel);
DECLARE_DISPATCH(padding_fn, reflection_pad2d_kernel);
DECLARE_DISPATCH(padding_fn, reflection_pad2d_backward_kernel);
DECLARE_DISPATCH(padding_fn, reflection_pad3d_kernel);
DECLARE_DISPATCH(padding_fn, reflection_pad3d_backward_kernel);
// replication padding
DECLARE_DISPATCH(padding_fn, replication_pad1d_kernel);
DECLARE_DISPATCH(padding_fn, replication_pad1d_backward_kernel);
DECLARE_DISPATCH(padding_fn, replication_pad2d_kernel);
DECLARE_DISPATCH(padding_fn, replication_pad2d_backward_kernel);
DECLARE_DISPATCH(padding_fn, replication_pad3d_kernel);
DECLARE_DISPATCH(padding_fn, replication_pad3d_backward_kernel);
namespace padding {
template <int dim>
static inline void check_valid_input(const Tensor& input, IntArrayRef padding) {
TORCH_CHECK(padding.size() == 2 * dim,
"padding size is expected to be ", 2 * dim,
", but got: ", padding.size());
int input_dim = input.dim();
bool is_batch_mode = input_dim == (dim + 2);
bool valid_batch_mode = is_batch_mode;
bool valid_non_batch_mode = !is_batch_mode;
if (is_batch_mode) {
// allow batch size of 0-dim.
for (const auto d : c10::irange(1, input_dim)) {
valid_batch_mode = valid_batch_mode && input.size(d) != 0;
}
} else {
for (const auto d : c10::irange(0, input_dim)) {
valid_non_batch_mode = valid_non_batch_mode && input.size(d) != 0;
}
}
// allow empty batch size but not other dimensions.
TORCH_CHECK(valid_batch_mode || valid_non_batch_mode,
"Expected ", dim + 1, "D or ", dim + 2,
"D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ",
input.sizes());
}
} // namespace padding
} // at::native
| 2,026
| 31.174603
| 108
|
h
|
null |
pytorch-main/aten/src/ATen/native/PointwiseOps.h
|
// Ternary and higher-order pointwise operations
#pragma once
#include <ATen/native/DispatchStub.h>
namespace c10 {
class Scalar;
}
namespace at {
struct TensorIterator;
struct TensorIteratorBase;
namespace native {
using pointwise_fn = void (*)(TensorIterator&, const Scalar& scalar);
using structured_pointwise_fn = void (*)(TensorIteratorBase&, const Scalar& scalar);
using pointwise_fn_double = void (*)(TensorIterator&, const Scalar&, double);
DECLARE_DISPATCH(structured_pointwise_fn, addcmul_stub);
DECLARE_DISPATCH(structured_pointwise_fn, addcdiv_stub);
DECLARE_DISPATCH(pointwise_fn_double, smooth_l1_backward_stub);
DECLARE_DISPATCH(pointwise_fn_double, huber_backward_stub);
DECLARE_DISPATCH(pointwise_fn, mse_backward_stub);
} // namespace native
} // namespace at
| 786
| 26.137931
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/native/Pool.h
|
#include <ATen/core/Tensor.h>
#include <ATen/div_rtn.h>
#include <ATen/TensorUtils.h>
#include <ATen/native/DispatchStub.h>
#include <c10/util/irange.h>
#include <utility>
#pragma once
namespace at::native {
using max_pool2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input,
int kW, int kH, int dW, int dH, int padW, int padH, int dilationW, int dilationH);
using max_pool2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
DECLARE_DISPATCH(max_pool2d_fn, max_pool2d_kernel);
DECLARE_DISPATCH(max_pool2d_backward_fn, max_pool2d_backward_kernel);
// averge pooling has same signature for forward and backward
using avg_pool2d_fn = void(*)(const Tensor& output, const Tensor& input, int64_t kW, int64_t kH,
int64_t dW, int64_t dH, int64_t padW, int64_t padH, bool count_include_pad, c10::optional<int64_t> divisor_override);
using avg_pool2d_backward_fn = void(*)(const Tensor& output, const Tensor& input, int kW, int kH,
int dW, int dH, int padW, int padH, bool count_include_pad, c10::optional<int64_t> divisor_override);
DECLARE_DISPATCH(avg_pool2d_fn, avg_pool2d_kernel);
DECLARE_DISPATCH(avg_pool2d_backward_fn, avg_pool2d_backward_kernel);
namespace {
template <typename dest_t, typename src_t>
static inline dest_t
safe_downcast(src_t v)
{
TORCH_CHECK(std::numeric_limits<dest_t>::min() <= v && v <= std::numeric_limits<dest_t>::max(),
"integer out of range");
return static_cast<dest_t>(v);
}
template<typename T>
static inline T pooling_output_shape_pad_lr(
T inputSize, T kernelSize, T pad_l, T pad_r, T stride, T dilation,
bool ceil_mode) {
T outputSize = div_rtn<T>(
inputSize + pad_l + pad_r - dilation * (kernelSize - 1) - 1 +
(ceil_mode ? stride - 1 : 0), stride) + 1;
if (ceil_mode) {
// ensure that the last pooling starts inside the image
// needed to avoid problems in ceil mode
if ((outputSize - 1) * stride >= inputSize + pad_l) {
--outputSize;
}
}
return outputSize;
}
template<typename T>
static inline T pooling_output_shape(
T inputSize, T kernelSize, T pad, T stride, T dilation, bool ceil_mode) {
TORCH_CHECK(stride != 0, "stride should not be zero");
TORCH_CHECK(pad >= 0,
"pad must be non-negative, but got pad: ", pad);
TORCH_CHECK(pad <= kernelSize / 2,
"pad should be at most half of kernel size, but got pad=",
pad, " and kernel_size=", kernelSize)
return pooling_output_shape_pad_lr(
inputSize, kernelSize, pad, pad, stride, dilation, ceil_mode);
}
template <typename T>
std::pair<T, T> _pooling_same_mode_padding_lr(
T inputSize, T kernelSize, int64_t stride, int64_t dilation) {
// NOTE: with strides, the output shape is ceil(inputSize/stride)
auto total_padding = T(dilation) * (kernelSize - 1);
// Prefer symmetric padding if possible
if (stride > 2 && (total_padding % 2 == 1)) {
// The floor in the output size calculation gives us a little wiggle room
auto wiggle_room = inputSize % stride - 1;
if (wiggle_room > 0) {
total_padding = total_padding - 1;
}
}
auto left = total_padding / 2;
return {left, total_padding - left};
}
inline std::pair<int64_t, int64_t> pooling_same_mode_padding_lr(
int64_t inputSize, int64_t kernelSize, int64_t stride, int64_t dilation) {
return _pooling_same_mode_padding_lr(inputSize, kernelSize, stride, dilation);
}
inline std::pair<c10::SymInt, c10::SymInt> pooling_same_mode_padding_lr(
c10::SymInt inputSize, c10::SymInt kernelSize, int64_t stride, int64_t dilation) {
return _pooling_same_mode_padding_lr(std::move(inputSize), std::move(kernelSize), stride, dilation);
}
// AveragePool2d/DilatedMaxPool2d (forward)
static inline void
pool2d_shape_check(
const Tensor& input,
int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW,
int64_t nInputPlane,
int64_t inputHeight, int64_t inputWidth,
int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
{
const int64_t ndim = input.ndimension();
const int64_t nOutputPlane = nInputPlane;
TORCH_CHECK(kW > 0 && kH > 0,
"kernel size should be greater than zero, but got ",
"kH: ", kH, " kW: ", kW);
TORCH_CHECK(dW > 0 && dH > 0,
"stride should be greater than zero, but got "
"dH: ", dH, " dW: ", dW);
TORCH_CHECK(dilationH > 0 && dilationW > 0,
"dilation should be greater than zero, but got ",
"dilationH: ", dilationH, " dilationW: ", dilationW);
bool valid_dims = input.size(1) != 0 && input.size(2) != 0;
if (memory_format == at::MemoryFormat::ChannelsLast){
// Expect tensor in NHWC format and allow 0-dim only for N.
TORCH_CHECK((ndim == 4 && valid_dims && input.size(3) != 0),
"Expected 4D (batch mode) tensor expected for input with channels_last layout"
" with optional 0 dim batch size for input, but got: ", input.sizes());
} else {
TORCH_CHECK((ndim == 3 && input.size(0) != 0 && valid_dims) ||
(ndim == 4 && valid_dims && input.size(3) != 0),
"Expected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got:",
input.sizes());
}
TORCH_CHECK(kW/2 >= padW && kH/2 >= padH,
"pad should be smaller than or equal to half of kernel size, but got ",
"padW = ", padW, ", padH = ", padH, ", kW = ", kW, ", kH = ", kH);
TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1,
"Given input size: (",
nInputPlane, "x", inputHeight, "x", inputWidth, "). ",
"Calculated output size: (",
nOutputPlane, "x", outputHeight, "x", outputWidth, "). ",
"Output size is too small");
}
// DilatedMaxPool2d (backward)
static inline void
max_pool2d_backward_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
int kH, int kW, int dH, int dW, int padH, int padW, int dilationH, int dilationW,
int64_t nInputPlane,
int64_t inputHeight, int64_t inputWidth,
int64_t outputHeight, int64_t outputWidth, MemoryFormat memory_format)
{
pool2d_shape_check(
input,
kH, kW, dH, dW, padH, padW, dilationH, dilationW,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, memory_format);
const int64_t ndim = input.ndimension();
const int64_t nOutputPlane = nInputPlane;
check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane);
check_dim_size(gradOutput, ndim, ndim-2, outputHeight);
check_dim_size(gradOutput, ndim, ndim-1, outputWidth);
check_dim_size(indices, ndim, ndim-3, nOutputPlane);
check_dim_size(indices, ndim, ndim-2, outputHeight);
check_dim_size(indices, ndim, ndim-1, outputWidth);
}
// AveragePool2d (backward)
static inline void
avg_pool2d_backward_shape_check(
const Tensor& input,
const Tensor& gradOutput,
int64_t /*nbatch*/,
int kH, int kW, int dH, int dW, int padH, int padW,
int64_t nInputPlane,
int64_t inputHeight, int64_t inputWidth,
int64_t outputHeight, int64_t outputWidth,
MemoryFormat memory_format)
{
pool2d_shape_check(
input,
kH, kW, dH, dW, padH, padW, 1, 1,
nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth,
memory_format);
const int64_t ndim = input.ndimension();
const int64_t nOutputPlane = nInputPlane;
check_dim_size(gradOutput, ndim, ndim-3, nOutputPlane);
check_dim_size(gradOutput, ndim, ndim-2, outputHeight);
check_dim_size(gradOutput, ndim, ndim-1, outputWidth);
}
// AveragePool3d/DilatedMaxPool3d (forward)
static inline void
pool3d_shape_check(
const Tensor& input,
int64_t nslices,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int64_t itime, int64_t iheight, int64_t iwidth,
int64_t otime, int64_t oheight, int64_t owidth,
const char *fn_name,
bool check_input_size=false)
{
const int64_t ndim = input.ndimension();
TORCH_CHECK(kT > 0 && kW > 0 && kH > 0,
"kernel size should be greater than zero, but got ",
"kT: ", kT, " kH: ", kH, " kW: ", kW);
TORCH_CHECK(dT > 0 && dW > 0 && dH > 0,
"stride should be greater than zero, but got ",
"dT: ", dT, " dH: ", dH, " dW: ", dW);
TORCH_CHECK(dilationT > 0 && dilationW > 0 && dilationH > 0,
"dilation should be greater than zero, but got ",
"dilationT: ", dilationT, " dilationH: ", dilationH, " dilationW: ", dilationW);
TORCH_CHECK(ndim == 4 || ndim == 5,
fn_name, ": Expected 4D or 5D tensor for input, but got: ", input.sizes());
for (const auto i : c10::irange(ndim)) {
if (ndim == 5 && i == 0) {
// size of batch-dim can be 0.
continue;
}
TORCH_CHECK(
input.size(i) > 0,
fn_name,
": Expected input's non-batch dimensions to have positive length,"
" but input has a shape of ",
input.sizes(),
" and non-batch dimension ",
input.size(i),
" has length zero!")
}
if (check_input_size) { // AveragePool3d
TORCH_CHECK(itime >= kT && iheight >= kH && iwidth >= kW,
"input image ", "(T: ", itime, " H: ", iheight, " W: ", iwidth, ") smaller than ",
"kernel size ", "(kT: ", kT, " kH: ", kH, " kW: ", kW, ")");
}
TORCH_CHECK(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH,
"pad should be smaller than or equal to half of kernel size, but got "
"kT: ", kT, " kW: ", kW, " kH: ", kH, " padT: ", pT, " padW: ", pW, " padH: ", pH);
TORCH_CHECK(otime >= 1 && owidth >= 1 && oheight >= 1,
"Given input size: (",
nslices,"x", itime, "x", iheight, "x", iwidth, "). ",
"Calculated output size: (",
nslices, "x", otime, "x", oheight, "x", owidth, "). ",
"Output size is too small");
}
static inline void
max_pool3d_backward_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
int64_t nslices,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int64_t itime, int64_t iheight, int64_t iwidth,
int64_t otime, int64_t oheight, int64_t owidth,
const char* fn_name)
{
const int64_t ndim = input.ndimension();
pool3d_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth, fn_name);
check_dim_size(gradOutput, ndim, ndim-4, nslices);
check_dim_size(gradOutput, ndim, ndim-3, otime);
check_dim_size(gradOutput, ndim, ndim-2, oheight);
check_dim_size(gradOutput, ndim, ndim-1, owidth);
check_dim_size(indices, ndim, ndim-4, nslices);
check_dim_size(indices, ndim, ndim-3, otime);
check_dim_size(indices, ndim, ndim-2, oheight);
check_dim_size(indices, ndim, ndim-1, owidth);
}
static inline void
avg_pool3d_backward_shape_check(
const Tensor& input,
const Tensor& gradOutput,
int64_t nslices,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int64_t itime, int64_t iheight, int64_t iwidth,
int64_t otime, int64_t oheight, int64_t owidth,
const char *fn_name)
{
const int64_t ndim = input.ndimension();
pool3d_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
1, 1, 1,
itime, iheight, iwidth,
otime, oheight, owidth,
fn_name, true);
check_dim_size(gradOutput, ndim, ndim-4, nslices);
check_dim_size(gradOutput, ndim, ndim-3, otime);
check_dim_size(gradOutput, ndim, ndim-2, oheight);
check_dim_size(gradOutput, ndim, ndim-1, owidth);
}
} // anonymous namespace
} // namespace at::native
| 11,881
| 34.468657
| 121
|
h
|
null |
pytorch-main/aten/src/ATen/native/Pow.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
namespace c10 {
class Scalar;
}
namespace at {
struct TensorIterator;
struct TensorIteratorBase;
namespace native {
#if defined(__CUDACC__) || defined(__HIPCC__)
#define HOST_DEVICE __host__ __device__
#else
#define HOST_DEVICE
#endif
// integral power in pytorch allows for negative exponents, giving truncated integral results.
// e.g. since 2**-1==0.5, the truncated integral result is zero. 1**negative_exponent is the
// only non-zero result.
template <class T,
typename std::enable_if<std::is_integral<T>::value, T>::type* = nullptr>
static inline HOST_DEVICE __ubsan_ignore_signed_int_overflow__ T powi_impl(T a, T b) {
T result = 1;
while (b) {
if (b & 1) {
result *= a;
}
b /= 2;
a *= a;
}
return result;
}
template <class T,
typename std::enable_if<std::is_integral<T>::value && !std::is_signed<T>::value, T>::type* = nullptr>
static inline HOST_DEVICE T powi(T a, T b) {
return powi_impl(a, b);
}
template <class T,
typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value, T>::type* = nullptr>
static inline HOST_DEVICE T powi(T a, T b) {
if ( b < 0 ) {
if ( a == 1 ) {
return 1;
} else if ( a == -1 ) {
auto negative = (-b) % static_cast<T>(2);
return negative ? -1 : 1;
} else {
return 0;
}
}
return powi_impl(a, b);
}
using pow_tensor_tensor_fn = void (*)(TensorIteratorBase&);
using pow_tensor_scalar_fn = void (*)(TensorIteratorBase&, const c10::Scalar&);
DECLARE_DISPATCH(pow_tensor_tensor_fn, pow_tensor_tensor_stub);
DECLARE_DISPATCH(pow_tensor_scalar_fn, pow_tensor_scalar_stub);
} // namespace native
} // namespace at
| 1,736
| 23.814286
| 103
|
h
|
null |
pytorch-main/aten/src/ATen/native/ReduceOps.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <c10/util/Optional.h>
namespace c10 {
class Scalar;
}
namespace at {
struct TensorIterator;
class Tensor;
}
namespace at::native {
using reduce_fn = void(*)(TensorIterator &);
DECLARE_DISPATCH(reduce_fn, sum_stub);
DECLARE_DISPATCH(reduce_fn, nansum_stub);
DECLARE_DISPATCH(reduce_fn, prod_stub);
DECLARE_DISPATCH(reduce_fn, mean_stub);
DECLARE_DISPATCH(reduce_fn, and_stub);
DECLARE_DISPATCH(reduce_fn, or_stub);
DECLARE_DISPATCH(reduce_fn, min_values_stub);
DECLARE_DISPATCH(reduce_fn, max_values_stub);
DECLARE_DISPATCH(reduce_fn, argmax_stub);
DECLARE_DISPATCH(reduce_fn, argmin_stub);
using reduce_std_var_function =
void (*)(TensorIterator&, double correction, bool take_sqrt);
DECLARE_DISPATCH(reduce_std_var_function, std_var_stub);
using reduce_norm_fn =
void (*)(Tensor&, const Tensor&, const c10::Scalar&, c10::optional<int64_t>);
DECLARE_DISPATCH(reduce_norm_fn, norm_kernel);
using reduce_fn_flag = void(*)(TensorIterator &, const c10::Scalar&);
DECLARE_DISPATCH(reduce_fn_flag, norm_stub);
using structured_cum_fn = void (*)(const Tensor&, const Tensor&, int64_t);
using cum_fn = void (*)(Tensor&, const Tensor&, int64_t);
DECLARE_DISPATCH(structured_cum_fn, cumsum_stub);
DECLARE_DISPATCH(structured_cum_fn, cumprod_stub);
DECLARE_DISPATCH(cum_fn, logcumsumexp_stub);
DECLARE_DISPATCH(void (*)(const Tensor&, int64_t, bool, Tensor&, Tensor&), aminmax_stub);
DECLARE_DISPATCH(void (*)(const Tensor&, Tensor&, Tensor&), aminmax_allreduce_stub);
// Used in cuda/Normalization.cu
TORCH_API std::tuple<Tensor&,Tensor&> var_mean_out(
Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim,
int64_t correction, bool keepdim);
} // namespace at::native
| 1,764
| 30.517857
| 89
|
h
|
null |
pytorch-main/aten/src/ATen/native/ReductionType.h
|
#pragma once
#include <c10/core/Scalar.h>
namespace at::native {
enum class ReductionType {MAX, MEAN, MIN, SUM, PROD};
static inline ReductionType get_reduction_enum(const c10::string_view& reduce) {
if (reduce == "max" || reduce == "amax") {
return ReductionType::MAX;
} else if (reduce == "mean") {
return ReductionType::MEAN;
} else if (reduce == "min" || reduce == "amin") {
return ReductionType::MIN;
} else if (reduce == "sum") {
return ReductionType::SUM;
} else if (reduce == "prod") {
return ReductionType::PROD;
} else {
TORCH_CHECK(false, "reduce argument must be either sum, prod, mean, amax or amin, got ", reduce);
}
}
// used for `scatter_reduce`, old options for BC.
static inline ReductionType get_operator_enum(const c10::string_view reduce, bool use_new_options) {
if (use_new_options) {
return get_reduction_enum(reduce);
} else {
if (reduce == "add") {
return ReductionType::SUM;
} else if (reduce == "multiply") {
return ReductionType::PROD;
} else {
TORCH_CHECK(false, "reduce argument must be either add or multiply.")
}
}
}
} // at::native
| 1,153
| 27.146341
| 101
|
h
|
null |
pytorch-main/aten/src/ATen/native/Repeat.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/TensorOperators.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#endif
namespace at::native {
template <
typename index_t,
void compute(index_t*, int64_t*, index_t*, int64_t, int64_t)>
static inline Tensor repeat_interleave_common(
const Tensor& repeats,
c10::optional<int64_t> output_size) {
TORCH_CHECK(
repeats.dim() == 1, "repeat_interleave only accept 1D vector as repeat");
TORCH_CHECK(
repeats.scalar_type() == at::kLong || repeats.scalar_type() == at::kInt,
"repeats has to be Long or Int tensor");
if (repeats.size(0) == 0) {
return at::empty_like(repeats, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
Tensor repeats_ = repeats.contiguous();
Tensor cumsum = repeats.cumsum(0);
int64_t total;
if (output_size.has_value()) {
total = output_size.value();
} else {
total = cumsum[-1].item<int64_t>();
TORCH_CHECK(
(repeats >= 0).all().item<uint8_t>(), "repeats can not be negative");
}
Tensor result = at::empty({total}, repeats.options());
index_t* repeat_ptr = repeats_.data_ptr<index_t>();
int64_t* cumsum_ptr = cumsum.data_ptr<int64_t>();
index_t* result_ptr = result.data_ptr<index_t>();
compute(repeat_ptr, cumsum_ptr, result_ptr, repeats.size(0), total);
return result;
}
} // namespace at::native
| 1,437
| 28.346939
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/Resize.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/native/ResizeCommon.h>
#include <ATen/EmptyTensor.h>
#include <ATen/TensorUtils.h>
#include <c10/core/CPUAllocator.h>
#include <utility>
namespace at::native {
// TODO: make all operations that resize given outputs use this function
// for consistency and maintainability.
// Some operations like `cat` might not be able to make the use of
// resize_output directly. For more details to understand how it works in `cat`,
// see https://github.com/pytorch/pytorch/pull/62560#discussion_r687363362
// Resizes outputs
// Functions accepting output tensors, like with the "out" kwarg, should
// call this function to handle resizing their output tensor.
// Issues a warning if the output tensor has one or more elements and
// needs resizing
// NOTE: In the future the warning will become an error
// Returns a bool saying whether or not the resize actually happened or not
TORCH_API bool resize_output(const Tensor& output, IntArrayRef shape);
TORCH_API bool resize_output_symint(const Tensor& output, SymIntArrayRef shape);
// Utility for resize_output
// Returns a bool saying resize should happen or not and
// raises a warning if resizing for one or more elements
TORCH_API bool resize_output_check(const Tensor& output, IntArrayRef shape);
TORCH_API bool resize_output_check_symint(const Tensor& output, SymIntArrayRef shape);
TORCH_API void resize_bytes_cpu(StorageImpl* storage, size_t size_bytes);
TORCH_API void resize_bytes_meta(StorageImpl* storage, c10::SymInt size_bytes);
static inline void maybe_resize_storage_cpu(TensorImpl* self, size_t new_size_bytes) {
// It does not make sense to try to resize a storage
// to hold 0 elements, and this can break
// if storage_offset is positive but
// new_size is 0, so just bail in that case
// (same comment is in cuda/Resize.h)
if (self->numel() == 0) {
return;
}
const Storage& storage = self->unsafe_storage();
if (!storage) {
auto new_storage = c10::make_intrusive<StorageImpl>(
StorageImpl::use_byte_size_t(),
new_size_bytes,
c10::GetCPUAllocator(),
true);
self->set_storage_keep_dtype(std::move(new_storage));
} else if (new_size_bytes > storage.nbytes()) {
resize_bytes_cpu(storage.unsafeGetStorageImpl(), new_size_bytes);
}
}
TORCH_API TensorImpl* resize_impl_cpu_(
TensorImpl* self,
IntArrayRef size,
at::OptionalIntArrayRef stride,
bool resize_storage = true);
template <typename T>
T maybe_convert_symint(c10::SymInt) = delete;
template <>
inline c10::SymInt maybe_convert_symint(c10::SymInt x) { return x; }
template <>
inline int64_t maybe_convert_symint(c10::SymInt x) { return x.expect_int(); }
template <typename T>
static inline void checkInBoundsForStorage(
ArrayRef<T> size,
ArrayRef<T> stride,
T storage_offset,
const caffe2::TypeMeta& data_type,
const Storage& new_storage) {
T storage_size_bytes =
at::detail::computeStorageNbytes(size, stride, data_type.itemsize());
T storage_offset_bytes = storage_offset * data_type.itemsize();
if (storage_size_bytes == 0) {
// NB: (a tensor with arbitrary 0 dims)'s storage can have any numel.
return;
}
T new_storage_size_bytes = maybe_convert_symint<T>(new_storage.sym_nbytes());
TORCH_CHECK(
storage_size_bytes + storage_offset_bytes <= new_storage_size_bytes,
"setStorage: sizes ",
size,
", strides ",
stride,
","
" storage offset ",
storage_offset,
", and itemsize ",
data_type.itemsize(),
" requiring a storage size of ",
storage_size_bytes + storage_offset_bytes,
" are out of bounds for storage of size ",
new_storage_size_bytes);
}
template <typename T>
static inline void checkSetStorage(Tensor& result, Storage storage, T storage_offset,
ArrayRef<T> size, ArrayRef<T> stride) {
// FIXME: stride should be optional
if (stride.data()) {
TORCH_CHECK(size.size() == stride.size(), "unequal size length (", size.size(),
") and stride length (", stride.size(), ")");
}
#ifdef DEBUG
TORCH_CHECK(size.size() <= INT_MAX, "size length (", size.size(), ") greater than INT_MAX");
#endif
// storage: note this can't be replaced with result.set_(storage) as the semantics of that
// function is to set the tensor size to be equal to the size of the storage.
if (!result.storage().is_alias_of(storage)) {
// Caffe2 might have tensors whose storages are null, but we
// don't allow it in PyTorch.
TORCH_INTERNAL_ASSERT(storage);
TORCH_INTERNAL_ASSERT(result.storage());
// We used to allow this, but this breaks device caching.
// Let's put an actual error message for this one.
TORCH_CHECK(result.storage().device() == storage.device(),
"Attempted to set the storage of a tensor on device \"", result.storage().device(),
"\" to a storage on different device \"", storage.device(),
"\". This is no longer allowed; the devices must match.");
result.unsafeGetTensorImpl()->set_storage_keep_dtype(std::move(storage));
}
// storageOffset
TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset);
}
/**
* Set self's sizes, strides, and storage_offset.
* (size, stride, storage_offset) must be in bounds for self's storage.
*/
template <typename T>
inline void setStrided(
const Tensor& self,
ArrayRef<T> size,
ArrayRef<T> stride,
T storage_offset) {
TORCH_CHECK(size.size() == stride.size(), "mismatch in length of strides and shape");
for (const auto& val : stride) {
TORCH_CHECK(val >= 0,
"as_strided: Negative strides are not supported at the moment, "
"got strides: ", stride);
}
auto* self_ = self.unsafeGetTensorImpl();
checkInBoundsForStorage(
size, stride, storage_offset, self_->dtype(), self_->storage());
/* storage offset */
TORCH_CHECK(storage_offset >= 0, "Tensor: invalid storage offset ", storage_offset);
self_->set_sizes_and_strides(size, stride, c10::make_optional(storage_offset));
}
} // namespace at::native
| 6,239
| 35.705882
| 99
|
h
|
null |
pytorch-main/aten/src/ATen/native/ResizeCommon.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/NamedTensorUtils.h>
#include <c10/util/irange.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#endif
namespace at::native {
template <typename T>
inline T storage_size_for(ArrayRef<T> size, ArrayRef<T> stride) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(size.size() == stride.size(),
"storage_size_for(size, stride) requires that size and stride ",
"have the same size as a precondition.");
T storage_size = 1;
for (const auto dim : c10::irange(size.size())) {
if (size[dim] == 0) {
storage_size = 0;
break;
}
storage_size += (size[dim] - 1) * stride[dim];
}
return storage_size;
}
inline const Tensor& resize_named_tensor_(
const Tensor& self,
IntArrayRef size,
c10::optional<MemoryFormat> optional_memory_format) {
TORCH_INTERNAL_ASSERT(self.has_names());
TORCH_CHECK(
self.sizes() == size,
"Cannot resize named tensor with resize_ or resize_as_ (tried to resize "
"Tensor",
self.names(),
" with size ",
self.sizes(),
" to ",
size,
"). This may be caused by passing a named tensor ",
"as an `out=` argument; please ensure that the sizes are the same. ");
TORCH_CHECK(
!optional_memory_format.has_value(),
"Unsupported memory format for named tensor resize ",
optional_memory_format.value());
return self;
}
// For deterministic output, fill new elements that were added after a storage
// resize with NaN or MAX_INT. `old_storage_nbytes` is the size of the storage
// before the resize happened.
inline const Tensor& fill_resize_deterministic_(const Tensor& tensor, int64_t old_storage_nbytes) {
const at::Storage& storage = tensor.unsafeGetTensorImpl()->unsafe_storage();
int64_t new_storage_nbytes = storage.nbytes();
int64_t old_storage_numel = old_storage_nbytes / tensor.itemsize();
int64_t new_storage_numel = new_storage_nbytes / tensor.itemsize();
if (new_storage_numel > old_storage_numel) {
at::Tensor tensor_view = at::empty({}, at::TensorOptions().dtype(tensor.scalar_type()).device(tensor.device()));
tensor_view.set_(
storage,
/*storage_offset=*/old_storage_numel,
/*size=*/{new_storage_numel - old_storage_numel},
/*stride=*/{1});
at::native::fill_empty_deterministic_(tensor_view);
}
return tensor;
}
} // namespace at::native
| 2,491
| 31.789474
| 116
|
h
|
null |
pytorch-main/aten/src/ATen/native/ScatterGatherChecks.h
|
#pragma once
#include <vector>
#include <ATen/core/Tensor.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <c10/util/irange.h>
namespace at::native {
namespace {
// checks whether index.dtype == int64
// and self.dtype == src.dtype if src is a Tensor
static void scatter_gather_dtype_check(
const std::string& method_name,
const Tensor& self,
const Tensor& index,
const c10::optional<Tensor>& src_opt = c10::nullopt
) {
if (index.numel() != 0) {
TORCH_CHECK(
index.scalar_type() == at::ScalarType::Long,
method_name, "(): Expected dtype int64 for index"
);
}
if (src_opt.has_value()) {
const auto& src = src_opt.value();
TORCH_CHECK(
self.scalar_type() == src.scalar_type(),
method_name, "(): Expected self.dtype to be equal to src.dtype"
);
}
}
// Used for `gather`-like methods
// Note: self means the input tensor here
// Test:
// 1. index.size(d) <= self.size(d) for all d != dim
// 2. index.dim() == self.dim()
static C10_UNUSED void gather_shape_check(const Tensor& self, int64_t dim,
const Tensor& index
) {
auto self_dims = ensure_nonempty_dim(self.dim());
TORCH_CHECK(self_dims == ensure_nonempty_dim(index.dim()),
"Index tensor must have the same number of dimensions as input tensor"
);
for (const auto i : c10::irange(self_dims)) {
if (i != dim) {
TORCH_CHECK(
ensure_nonempty_size(index, i) <= ensure_nonempty_size(self, i),
"Size does not match at dimension ", i,
" expected index ", index.sizes(),
" to be smaller than self ", self.sizes(),
" apart from dimension ", dim
);
}
}
}
// Used for `scatter` and `scatter_add`
// Tests:
// 1. index.size(d) <= self.size(d) for all d != dim
// 2. index.size(d) <= src.size(d) for all d if src is a Tensor
// 3. index.dim() == self.dim() == src.dim()
static C10_UNUSED void scatter_shape_check(
const Tensor& self, int64_t dim, const Tensor& index,
const c10::optional<Tensor>& src_opt = c10::nullopt
) {
if (index.numel() == 0) return;
TORCH_CHECK(
ensure_nonempty_dim(self.dim()) == ensure_nonempty_dim(index.dim()),
"Index tensor must have the same number of dimensions as self tensor"
);
bool is_wrong_shape = false;
int64_t self_dims = ensure_nonempty_dim(self.dim());
// Check: index.size(d) <= self.size(d) for all d != dim
for (const auto d : c10::irange(self_dims)) {
int64_t index_d_size = ensure_nonempty_size(index, d);
if (d == dim) continue;
if (index_d_size > ensure_nonempty_size(self, d)) {
is_wrong_shape = true;
break;
}
}
// Check: index.size(d) <= src.size(d) for all d if src is Tensor
if (!is_wrong_shape && src_opt.has_value()) {
const auto& src = src_opt.value();
for (const auto d : c10::irange(self_dims)) {
int64_t index_d_size = ensure_nonempty_size(index, d);
if (index_d_size > ensure_nonempty_size(src, d)) {
is_wrong_shape = true;
break;
}
}
}
if (src_opt.has_value()) {
const auto& src = src_opt.value();
TORCH_CHECK(
ensure_nonempty_dim(src.dim()) == ensure_nonempty_dim(index.dim()),
"Index tensor must have the same number of dimensions as src tensor"
);
TORCH_CHECK(!is_wrong_shape,
"Expected index ", index.sizes(),
" to be smaller than self ", self.sizes(),
" apart from dimension ", dim,
" and to be smaller size than src ", src.sizes()
);
}
else {
TORCH_CHECK(!is_wrong_shape,
"Expected index ", index.sizes(),
" to be smaller than self ", self.sizes(),
" apart from dimension ", dim
);
}
}
} // anonymous namespace
} // namespace at::native
| 3,698
| 27.674419
| 74
|
h
|
null |
pytorch-main/aten/src/ATen/native/SegmentReduce.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <ATen/native/ReductionType.h>
#include <c10/core/Scalar.h>
#include <c10/util/Optional.h>
namespace at {
class Tensor;
namespace native {
using segment_reduce_lengths_fn = Tensor (*)(
ReductionType,
const Tensor&,
const Tensor&,
int64_t,
const c10::optional<Scalar>&);
DECLARE_DISPATCH(segment_reduce_lengths_fn, _segment_reduce_lengths_stub);
using segment_reduce_offsets_fn = Tensor (*)(
ReductionType,
const Tensor&,
const Tensor&,
int64_t,
const c10::optional<Scalar>&);
DECLARE_DISPATCH(segment_reduce_offsets_fn, _segment_reduce_offsets_stub);
using segment_reduce_lengths_backward_fn = Tensor (*)(
const Tensor&,
const Tensor&,
const Tensor&,
ReductionType,
const Tensor&,
int64_t,
const c10::optional<Scalar>&);
DECLARE_DISPATCH(segment_reduce_lengths_backward_fn, _segment_reduce_lengths_backward_stub);
using segment_reduce_offsets_backward_fn = Tensor (*)(
const Tensor&,
const Tensor&,
const Tensor&,
ReductionType,
const Tensor&,
int64_t,
const c10::optional<Scalar>&);
DECLARE_DISPATCH(segment_reduce_offsets_backward_fn, _segment_reduce_offsets_backward_stub);
} // namespace native
} // namespace at
| 1,280
| 24.117647
| 92
|
h
|
null |
pytorch-main/aten/src/ATen/native/SharedReduceOps.h
|
#pragma once
// Please note that this file is
// used across both CPU and GPU.
#include <type_traits>
#include <complex>
#include <c10/macros/Macros.h>
#include <ATen/detail/FunctionTraits.h>
#include <ATen/NumericUtils.h>
#if defined(__CUDACC__)
#include <ATen/cuda/DeviceUtils.cuh>
#include <ATen/native/cuda/DeviceSqrt.cuh>
#elif defined(__HIPCC__)
#include <ATen/hip/DeviceUtils.cuh>
#include <ATen/native/hip/DeviceSqrt.cuh>
#endif
#if defined(__CUDACC__) || defined(__HIPCC__)
#include <thrust/pair.h>
#else
#include <cmath>
#define device_sqrt std::sqrt
#endif
#if defined(__CUDACC__) || defined(__HIPCC__)
template <typename scalar_t>
inline C10_DEVICE scalar_t max_propagate_nan(scalar_t a, scalar_t b) {
#if defined(__HIPCC__)
// TODO: remove this special case for HIP when issue is fixed:
// https://github.com/ROCm-Developer-Tools/HIP/issues/2209
scalar_t max = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::max(a, b));
#else
scalar_t max = at::_isnan(b) ? b : std::max(a, b);
#endif
return max;
}
template <typename scalar_t>
inline C10_DEVICE scalar_t min_propagate_nan(scalar_t a, scalar_t b) {
#if defined(__HIPCC__)
// TODO: remove this special case for HIP when issue is fixed:
// https://github.com/ROCm-Developer-Tools/HIP/issues/2209
scalar_t min = at::_isnan(a) ? a : (at::_isnan(b) ? b : std::min(a, b));
#else
scalar_t min = at::_isnan(b) ? b : std::min(a, b);
#endif
return min;
}
#define MAX(X, Y) max_propagate_nan(X,Y)
#define MIN(X, Y) min_propagate_nan(X,Y)
#else
#include <ATen/native/cpu/zmath.h>
#define MAX(X, Y) max_impl(X,Y)
#define MIN(X, Y) min_impl(X,Y)
#endif
// ROCM hcc doesn't work well with using std:: in kernel functions
#if defined(__CUDA_ARCH__)
#include <c10/cuda/CUDAMathCompat.h>
#define compat_pow c10::cuda::compat::pow
#elif defined(__HIPCC__)
#include <c10/hip/HIPMathCompat.h>
#define compat_pow c10::hip::compat::pow
#else
#define compat_pow std::pow
#endif
namespace at { namespace native {
namespace detail {
#if defined(__CUDACC__) || defined(__HIPCC__)
template <typename T1, typename T2> using pair = thrust::pair<T1, T2>;
#else
template <typename T1, typename T2> using pair = std::pair<T1, T2>;
#endif
} // namespace detail
template <typename scalar_t, typename index_t>
struct WelfordData {
scalar_t mean;
scalar_t m2;
index_t n;
scalar_t nf;
C10_HOST_DEVICE WelfordData() : mean(0), m2(0), n(0), nf(0) {}
C10_HOST_DEVICE WelfordData(
scalar_t mean,
scalar_t m2,
index_t n,
scalar_t nf)
: mean(mean), m2(m2), n(n), nf(nf) {}
};
template <typename scalar_t, typename acc_scalar_t, typename index_t, typename res_t>
struct WelfordOps {
acc_scalar_t correction;
bool take_sqrt;
public:
using acc_t = WelfordData<acc_scalar_t, index_t>;
inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, index_t /*idx*/) const {
// We accumulate n in index_t to avoid cumulative rounding error, but still
// need nf for use in combine where int32 may overflow.
index_t new_n = acc.n + 1;
acc_scalar_t new_nf = static_cast<acc_scalar_t>(new_n);
acc_scalar_t delta = data - acc.mean;
acc_scalar_t new_mean = acc.mean + delta / new_nf;
acc_scalar_t new_delta = data - new_mean;
return {
new_mean,
acc.m2 + delta * new_delta,
new_n,
new_nf,
};
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
if (a.nf == 0) {
return b;
}
if (b.nf == 0) {
return a;
}
acc_scalar_t delta = b.mean - a.mean;
acc_scalar_t new_count = a.nf + b.nf;
acc_scalar_t nb_over_n = b.nf / new_count;
return {
a.mean + delta * nb_over_n,
a.m2 + b.m2 + delta * delta * a.nf * nb_over_n,
// setting acc.n as -1 since acc.n might not be able to represent the count
// correctly within its range, setting it to -1 to avoid confusion
-1,
new_count
};
}
inline C10_DEVICE res_t project(acc_t acc) const __ubsan_ignore_float_divide_by_zero__ {
const auto mean = static_cast<scalar_t>(acc.mean);
const auto divisor = acc.nf > correction ? acc.nf - correction : 0;
const auto var = acc.m2 / divisor;
res_t results(take_sqrt ? device_sqrt(var) : var, mean);
return results;
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline __device__ acc_t warp_shfl_down(acc_t acc, int offset) const {
return {
WARP_SHFL_DOWN(acc.mean, offset)
, WARP_SHFL_DOWN(acc.m2, offset)
, WARP_SHFL_DOWN(acc.n, offset)
, WARP_SHFL_DOWN(acc.nf, offset)
};
}
#endif
C10_HOST_DEVICE WelfordOps(acc_scalar_t correction, bool take_sqrt)
: correction(correction), take_sqrt(take_sqrt) {}
};
template <typename scalar_t, typename acc_t=scalar_t, typename factor_t=acc_t, typename out_t = acc_t>
struct MeanOps {
factor_t factor;
inline C10_DEVICE acc_t reduce(acc_t a, scalar_t b, int64_t /*idx*/) const {
return combine(a, static_cast<acc_t>(b));
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
return a + b;
}
inline C10_DEVICE out_t project(acc_t a) const {
return a * factor;
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t data, int offset) const {
return WARP_SHFL_DOWN(data, offset);
}
#endif
MeanOps(factor_t factor): factor(factor) {
}
};
// This accumulator template is used to calculate the minimum absolute value of
// a set of numbers.
// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
// value. These types differ for complex number input support.
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
struct AbsMinOps {
inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
return MIN(acc, static_cast<acc_t>(std::abs(data)));
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
return MIN(a, b);
}
inline C10_DEVICE out_t project(acc_t a) const {
return a;
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
return WARP_SHFL_DOWN(acc, offset);
}
#endif
};
// This accumulator template is used to calculate the maximum absolute value of
// a set of numbers.
// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
// value. These types differ for complex number input support.
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
struct AbsMaxOps {
inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
return MAX(acc, static_cast<acc_t>(std::abs(data)));
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
return MAX(a, b);
}
inline C10_DEVICE out_t project(acc_t a) const {
return a;
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
return WARP_SHFL_DOWN(acc, offset);
}
#endif
};
// This accumulator template is used to calculate the norm of the absolute value
// of a set of numbers.
// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
// value. These types differ for complex number input support.
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
struct NormOps {
acc_t norm_;
inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
return acc + compat_pow(static_cast<acc_t>(std::abs(data)), norm_);
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
return a + b;
}
inline C10_DEVICE out_t project(acc_t a) const {
return compat_pow(a, static_cast<acc_t>(1.0) / norm_);
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
return WARP_SHFL_DOWN(acc, offset);
}
#endif
NormOps(acc_t norm_): norm_(norm_) {
}
};
// This accumulator template is used to calculate the order zero norm of the
// absolute value of a set of numbers.
// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
// value. These types differ for complex number input support.
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
struct NormZeroOps {
inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
return acc + (data == static_cast<scalar_t>(0) ? static_cast<acc_t>(0) : static_cast<acc_t>(1));
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
return a + b;
}
inline C10_DEVICE out_t project(acc_t a) const {
return a;
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
return WARP_SHFL_DOWN(acc, offset);
}
#endif
};
// This accumulator template is used to calculate the order one norm of the
// absolute value of a set of numbers.
// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
// value. These types differ for complex number input support.
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
struct NormOneOps {
inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
return acc + static_cast<acc_t>(std::abs(data));
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
return a + b;
}
inline C10_DEVICE out_t project(acc_t a) const {
return a;
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
return WARP_SHFL_DOWN(acc, offset);
}
#endif
};
template<typename acc_t>
struct AbsSwitch {};
template<typename scalar_t, typename acc_t>
inline C10_DEVICE acc_t abs_if_complex(scalar_t data, AbsSwitch<acc_t>) {
return static_cast<acc_t>(data);
}
template<typename scalar_t, typename acc_t>
inline C10_DEVICE acc_t abs_if_complex(std::complex<scalar_t> data, AbsSwitch<acc_t>) {
return static_cast<acc_t>(std::abs(data));
}
template<typename scalar_t, typename acc_t>
inline C10_DEVICE acc_t abs_if_complex(c10::complex<scalar_t> data, AbsSwitch<acc_t>) {
return static_cast<acc_t>(std::abs(data));
}
// This accumulator template is used to calculate the order two norm of the
// absolute value of a set of numbers.
// `scalar_t` is the type of the input and `acc_t` is the type of the accumulated
// value. These types differ for complex number input support.
template <typename scalar_t, typename acc_t = scalar_t, typename out_t = acc_t>
struct NormTwoOps {
inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, int64_t /*idx*/) const {
acc_t data_ = abs_if_complex(data, AbsSwitch<acc_t>());
return acc + data_ * data_;
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
return a + b;
}
inline C10_DEVICE out_t project(acc_t a) const {
return device_sqrt(a);
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
return WARP_SHFL_DOWN(acc, offset);
}
#endif
};
template <typename acc_t, typename data_t>
struct NanSumOps {
inline C10_DEVICE acc_t reduce(acc_t a, data_t b, int64_t /*idx*/) const {
return a + (at::_isnan(b) ? acc_t{0.} : acc_t{b});
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
return a + b;
}
inline C10_DEVICE data_t project(acc_t a) const {
return data_t{a};
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t data, int offset) const {
return WARP_SHFL_DOWN(data, offset);
}
#endif
};
namespace detail {
template <typename scalar_t>
struct LessOrNan {
C10_DEVICE bool operator () (scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) const {
// If (a == b), then choose the one with lower idx, else min(a, b)
if (at::_isnan(a)) {
if (at::_isnan(b)) {
return idx_a < idx_b;
}
return true;
}
return (a == b) ? idx_a < idx_b : (a < b);
}
};
template <typename scalar_t>
struct GreaterOrNan {
C10_DEVICE bool operator () (scalar_t a, scalar_t b, int64_t idx_a, int64_t idx_b) const {
// If (a == b), then choose the one with lower idx, else max(a, b)
if (at::_isnan(a)) {
if (at::_isnan(b)) {
return idx_a < idx_b;
}
return true;
}
return (a == b) ? idx_a < idx_b : (a > b);
}
};
template <typename comp_t>
struct MinMaxReductionOps {
using scalar_t = typename binary_function_traits<comp_t>::arg1_t;
using index_t = int64_t;
using arg_t = detail::pair<scalar_t, index_t>;
static C10_DEVICE arg_t project(arg_t arg) {
return arg;
}
static C10_DEVICE arg_t reduce(arg_t arg, scalar_t val, int64_t idx) {
return comp_t{}(arg.first, val, arg.second, idx) ? arg : arg_t(val, idx);
}
static C10_DEVICE arg_t combine(arg_t a, arg_t b) {
return comp_t{}(a.first, b.first, a.second, b.second) ? a : b;
}
static C10_DEVICE arg_t translate_idx(arg_t a, int64_t base_idx) {
return {a.first, a.second + base_idx};
}
#if defined(__CUDACC__) || defined(__HIPCC__)
static C10_DEVICE arg_t warp_shfl_down(arg_t arg, int offset) {
return arg_t(WARP_SHFL_DOWN(arg.first, offset),
WARP_SHFL_DOWN(arg.second, offset));
}
#endif
};
template <typename comp_t>
struct ArgReductionOps : public MinMaxReductionOps<comp_t> {
using typename MinMaxReductionOps<comp_t>::scalar_t;
using typename MinMaxReductionOps<comp_t>::index_t;
using typename MinMaxReductionOps<comp_t>::arg_t;
static C10_DEVICE index_t project(arg_t arg) {
return arg.second;
}
};
} // namespace detail
template <typename scalar_t>
struct ArgMaxOps :
public detail::ArgReductionOps<detail::GreaterOrNan<scalar_t>> {
};
template <typename scalar_t>
struct ArgMinOps :
public detail::ArgReductionOps<detail::LessOrNan<scalar_t>> {
};
template <typename scalar_t>
struct MinOps :
public detail::MinMaxReductionOps<detail::LessOrNan<scalar_t>> {
};
template <typename scalar_t>
struct MaxOps :
public detail::MinMaxReductionOps<detail::GreaterOrNan<scalar_t>> {
};
template <typename scalar_t, typename acc_scalar_t, typename index_t>
struct MinMaxOps {
using acc_t = detail::pair<acc_scalar_t, acc_scalar_t>;
inline C10_DEVICE acc_t reduce(acc_t acc, scalar_t data, index_t /*idx*/) const {
return combine(acc, {data, data});
}
inline C10_DEVICE acc_t combine(acc_t a, acc_t b) const {
auto min_val = (at::_isnan(a.first) || a.first < b.first) ? a.first : b.first;
auto max_val = (at::_isnan(a.second) || a.second > b.second) ? a.second : b.second;
return {min_val, max_val};
}
inline C10_DEVICE acc_t project(acc_t acc) const {
return acc;
}
static C10_DEVICE acc_t translate_idx(acc_t acc, int64_t /*base_idx*/) {
return acc;
}
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE acc_t warp_shfl_down(acc_t acc, int offset) const {
return {
WARP_SHFL_DOWN(acc.first, offset), WARP_SHFL_DOWN(acc.second, offset)
};
}
#endif
};
}} // namespace at::native
#undef MAX
#undef MIN
| 15,989
| 28.33945
| 102
|
h
|
null |
pytorch-main/aten/src/ATen/native/SobolEngineOpsUtils.h
|
/// This file contains some tensor-agnostic operations to be used in the
/// core functions of the `SobolEngine`
#include <ATen/core/Tensor.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/arange.h>
#include <ATen/ops/mul.h>
#include <ATen/ops/pow.h>
#endif
namespace at::native::sobol_utils {
/// Function to return the minimum of number of bits to represent the integer `n`
inline int64_t bit_length(const int64_t n) {
int64_t nbits, nloc;
for (nloc = n, nbits = 0; nloc > 0; nloc /= 2, nbits++);
return nbits;
}
/// Function to get the position of the rightmost zero in the bit representation of an integer
/// This value is the zero-indexed position
inline int64_t rightmost_zero(const int64_t n) {
int64_t z, i;
for (z = n, i = 0; z % 2 == 1; z /= 2, i++);
return i;
}
/// Function to get a subsequence of bits in the representation of an integer starting from
/// `pos` and of length `length`
inline int64_t bitsubseq(const int64_t n, const int64_t pos, const int64_t length) {
return (n >> pos) & ((1 << length) - 1);
}
/// Function to perform the inner product between a batched square matrix and a power of 2 vector
inline at::Tensor cdot_pow2(const at::Tensor& bmat) {
at::Tensor inter = at::arange(bmat.size(-1) - 1, -1, -1, bmat.options());
inter = at::pow(2, inter).expand_as(bmat);
return at::mul(inter, bmat).sum(-1);
}
/// All definitions below this point are data. These are constant, and should not be modified
/// without notice
constexpr int64_t MAXDIM = 21201;
constexpr int64_t MAXDEG = 18;
constexpr int64_t MAXBIT = 30;
constexpr int64_t LARGEST_NUMBER = 1 << MAXBIT;
constexpr float RECIPD = 1.0 / LARGEST_NUMBER;
extern const int64_t poly[MAXDIM];
extern const int64_t initsobolstate[MAXDIM][MAXDEG];
} // namespace at::native::sobol_utils
| 1,835
| 31.785714
| 97
|
h
|
null |
pytorch-main/aten/src/ATen/native/Sorting.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <cstdint>
namespace at {
class TensorBase;
}
namespace at::native {
enum class QUANTILE_INTERPOLATION_MODE : uint8_t {
LINEAR,
LOWER,
HIGHER,
MIDPOINT,
NEAREST
};
using sort_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, bool, bool);
using topk_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, int64_t, bool, bool);
DECLARE_DISPATCH(sort_fn, sort_stub);
DECLARE_DISPATCH(topk_fn, topk_stub);
void _fill_indices(const TensorBase &indices, int64_t dim);
} // namespace at::native
| 618
| 20.344828
| 111
|
h
|
null |
pytorch-main/aten/src/ATen/native/SortingUtils.h
|
#pragma once
#include <ATen/NumericUtils.h>
#include <ATen/native/Resize.h>
#include <c10/util/irange.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/empty.h>
#endif
namespace at::native {
// ensure we get good values and indices for kthvalue, mode
// this will always be with the reducing dim as 1-d
inline void _reduction_with_indices_allocate_or_resize_output(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t dim_,
bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true);
auto result_sizes = self.sizes().vec();
if (!result_sizes.empty()) {
result_sizes[dim] = 1;
}
if (values.defined()) {
TORCH_CHECK(
self.options().type_equal(values.options()),
"output values must be of same type as input");
if (!keepdim && values.dim() == self.dim() - 1) {
// unsqueeze to preserve passed in noncontiguous tensor in resize
values.unsqueeze_(dim);
}
resize_output(values, result_sizes);
} else {
values = at::empty(result_sizes, self.options());
}
if (indices.defined()) {
TORCH_CHECK(
indices.dtype() == kLong, "output indices must be of scalar type Long");
TORCH_CHECK(
indices.device() == self.device(),
"output indices must be on same device as input");
if (!keepdim && indices.dim() == self.dim() - 1) {
// unsqueeze to preserve passed in noncontiguous tensor in resize
indices.unsqueeze_(dim);
}
resize_output(indices, result_sizes);
} else {
indices = at::empty(result_sizes, self.options().dtype(kLong));
}
}
// ensure we get good values and indices for topk
inline void _allocate_or_resize_output_with_indices(
Tensor& values,
Tensor& indices,
const Tensor& self,
int64_t dim_,
int64_t k) {
int64_t dim = maybe_wrap_dim(dim_, self.dim(), /*wrap_scalar=*/true);
auto result_sizes = self.sizes().vec();
if (!result_sizes.empty()) {
result_sizes[dim] = k;
}
if (values.defined()) {
TORCH_CHECK(
self.options().type_equal(values.options()),
"output values must be of same type as input");
values.resize_(result_sizes);
} else {
values = at::empty(result_sizes, self.options());
}
if (indices.defined()) {
TORCH_CHECK(
indices.dtype() == kLong, "output indices must be of scalar type Long");
TORCH_CHECK(
indices.device() == self.device(),
"output indices must be on same device as input");
indices.resize_(result_sizes);
} else {
indices = at::empty(result_sizes, self.options().dtype(kLong));
}
}
} // namespace at::native
| 2,672
| 29.033708
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/SparseTensorUtils.h
|
#pragma once
#include <ATen/Parallel.h>
#include <ATen/SparseTensorImpl.h>
#include <ATen/core/Tensor.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/tensor.h>
#endif
namespace at::sparse {
// Just for documentary purposes
using SparseTensor = Tensor;
using SparseType = Type;
// This is an internal utility function for getting at the SparseTensorImpl,
// so that we can write sparse tensor specific accessors for special fields
// in SparseTensor. You should only use this for writing low level
// setters/getters for SparseTensorImpl fields; otherwise, you should use
// the low level setters/getters that were implemented using this.
//
// This may be called repeatedly, so make sure it's pretty cheap.
inline SparseTensorImpl* get_sparse_impl(const SparseTensor& self) {
TORCH_INTERNAL_ASSERT(
self.is_sparse(), "_internal_get_SparseTensorImpl: not a sparse tensor");
return static_cast<SparseTensorImpl*>(self.unsafeGetTensorImpl());
}
// Takes indices and values and directly puts them into the sparse tensor, no
// copy. This used to be called THSTensor_(_move)
inline void alias_into_sparse(
const SparseTensor& self,
const Tensor& indices,
const Tensor& values) {
get_sparse_impl(self)->set_indices_and_values_unsafe(indices, values);
}
// Take indices and values and makes a (data) copy of them to put into the
// sparse indices/values. This used to be called THSTensor_(_set)
inline void copy_into_sparse(
const SparseTensor& self,
const Tensor& indices,
const Tensor& values,
bool non_blocking) {
alias_into_sparse(
self,
indices.to(self._indices().options(), non_blocking, /*copy=*/true),
values.to(self._values().options(), non_blocking, /*copy=*/true));
}
// TODO: put this into the public API
inline bool is_same_tensor(const Tensor& lhs, const Tensor& rhs) {
return lhs.unsafeGetTensorImpl() == rhs.unsafeGetTensorImpl();
}
inline bool is_same_density(const SparseTensor& self, const SparseTensor& src) {
return self.sparse_dim() == src.sparse_dim() &&
self.dense_dim() == src.dense_dim();
}
// Give us a new values tensor, with the same dimensionality
// as 'values' but with a new number of non-zero elements.
// TODO: Expose this for real in ATen, some day?
// NB: Doesn't preserve data.
inline Tensor new_values_with_size_of(const Tensor& values, int64_t nnz) {
std::vector<int64_t> size = values.sizes().vec();
size[0] = nnz;
return at::empty(size, values.options());
}
// NOTE [ Flatten Sparse Indices ]
// This helper function flattens a sparse indices tensor (a Tensor) into a 1D
// indices tensor. E.g.,
// input = [[2, 4, 0],
// [3, 1, 10]]
// full_size = [2, 12]
// output = [ 2 * 12 + 3, 4 * 12 + 1, 0 * 12 + 10 ] = [27, 49, 10]
//
// In other words, assuming that each `indices[i, :]` is a valid index to a
// tensor `t` of shape `full_size`. This returns the corresponding indices to
// the flattened tensor `t.reshape( prod(full_size[:indices.size(0)]), -1 )`.
// if forceClone is true, the result will forced to be a clone of self.
// if force_clone is true, the result will forced to be a clone of self.
TORCH_API Tensor flatten_indices(
const Tensor& indices,
IntArrayRef full_size,
bool force_clone = false);
// Flatten sparse tensor's indices from nD to 1D, similar to NOTE [ Flatten
// Sparse Indices ], except this one allows partial flatten: only flatten on
// specified dims. Note that the flatten indices might be uncoalesced if
// dims_to_flatten.size() < sparse_dim. Also if input indices is already
// coalesced, the flattened indices will also be sorted.
//
// args:
// indices: sparse tensor indices
// sizes: sparse tensor sizes
// dims_to_flatten: a list of dim index to flatten
//
// Ex1:
// indices = [[2, 4, 0],
// [3, 1, 3]]
// sizes = [2, 12]
// dims_to_flatten = [0, 1]
// new_indices = [ 2 * 12 + 3, 4 * 12 + 1, 0 * 12 + 3 ] = [27, 49, 3]
//
// Ex2:
// dims_to_flatten = [1]
// new_indices = [ 3, 1, 3 ] # uncoalesced
TORCH_API Tensor flatten_indices_by_dims(
const Tensor& indices,
const IntArrayRef& sizes,
const IntArrayRef& dims_to_flatten);
// Find the CSR representation for a row `indices` from the COO format
TORCH_API Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz);
TORCH_API Tensor zeros_like_with_indices(const Tensor& t);
template <size_t static_shape_max_len>
class TensorGeometryHolder {
using geometry_holder_t = std::array<int64_t, static_shape_max_len>;
public:
explicit TensorGeometryHolder(
IntArrayRef sizes,
IntArrayRef strides,
TensorOptions options = {}) {
std::copy(sizes.begin(), sizes.end(), t_sizes.begin());
std::copy(strides.begin(), strides.end(), t_strides.begin());
}
explicit TensorGeometryHolder(const Tensor& t)
: TensorGeometryHolder(t.sizes(), t.strides()) {}
auto operator*() const {
return std::make_tuple(t_sizes, t_strides);
}
private:
geometry_holder_t t_sizes;
geometry_holder_t t_strides;
};
template <>
class TensorGeometryHolder<0> {
using geometry_holder_t = Tensor;
public:
explicit TensorGeometryHolder(
IntArrayRef sizes,
IntArrayRef strides,
TensorOptions options) {
const int64_t t_ndims = sizes.size();
const auto cpu_options = TensorOptions(options).dtype(kLong).device(kCPU);
Tensor t_sizes_and_strides_cpu = at::empty({2, t_ndims}, cpu_options);
t_sizes_and_strides_cpu.select(0, 0).copy_(at::tensor(sizes, cpu_options));
t_sizes_and_strides_cpu.select(0, 1).copy_(
at::tensor(strides, cpu_options));
const Tensor t_sizes_and_strides =
t_sizes_and_strides_cpu.to(options.device());
t_sizes = t_sizes_and_strides.select(0, 0);
t_strides = t_sizes_and_strides.select(0, 1);
}
explicit TensorGeometryHolder(const Tensor& t)
: TensorGeometryHolder(t.sizes(), t.strides(), t.options()) {}
auto operator*() const {
return std::make_tuple(
t_sizes.template data_ptr<int64_t>(),
t_strides.template data_ptr<int64_t>());
}
private:
geometry_holder_t t_sizes;
geometry_holder_t t_strides;
};
} // namespace at::sparse
| 6,253
| 32.805405
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/StridedRandomAccessor.h
|
#pragma once
namespace at::native {
// (Const)StridedRandomAccessor is a
// (const) random access iterator defined over
// a strided array.
// The traits below are to introduce __restrict__
// modifier on different platforms.
template <typename T>
struct DefaultPtrTraits {
using PtrType = T*;
};
#if (defined(_WIN32) || defined(_WIN64))
#define RESTRICT __restrict
#else
#define RESTRICT __restrict__
#endif
template <typename T>
struct RestrictPtrTraits {
using PtrType = T* RESTRICT;
};
template <
typename T,
typename index_t = int64_t,
template <typename U> class PtrTraits = DefaultPtrTraits
>
class ConstStridedRandomAccessor {
public:
using difference_type = index_t;
using value_type = const T;
using pointer = const typename PtrTraits<T>::PtrType;
using reference = const value_type&;
using iterator_category = std::random_access_iterator_tag;
using PtrType = typename PtrTraits<T>::PtrType;
using index_type = index_t;
// Constructors {
C10_HOST_DEVICE
ConstStridedRandomAccessor(PtrType ptr, index_t stride)
: ptr{ptr}, stride{stride}
{}
C10_HOST_DEVICE
explicit ConstStridedRandomAccessor(PtrType ptr)
: ptr{ptr}, stride{static_cast<index_t>(1)}
{}
C10_HOST_DEVICE
ConstStridedRandomAccessor()
: ptr{nullptr}, stride{static_cast<index_t>(1)}
{}
// }
// Pointer-like operations {
C10_HOST_DEVICE
reference operator*() const {
return *ptr;
}
C10_HOST_DEVICE
const value_type* operator->() const {
return reinterpret_cast<const value_type*>(ptr);
}
C10_HOST_DEVICE
reference operator[](index_t idx) const {
return ptr[idx * stride];
}
// }
// Prefix/postfix increment/decrement {
C10_HOST_DEVICE
ConstStridedRandomAccessor& operator++() {
ptr += stride;
return *this;
}
C10_HOST_DEVICE
ConstStridedRandomAccessor operator++(int) {
ConstStridedRandomAccessor copy(*this);
++*this;
return copy;
}
C10_HOST_DEVICE
ConstStridedRandomAccessor& operator--() {
ptr -= stride;
return *this;
}
C10_HOST_DEVICE
ConstStridedRandomAccessor operator--(int) {
ConstStridedRandomAccessor copy(*this);
--*this;
return copy;
}
// }
// Arithmetic operations {
C10_HOST_DEVICE
ConstStridedRandomAccessor& operator+=(index_t offset) {
ptr += offset * stride;
return *this;
}
C10_HOST_DEVICE
ConstStridedRandomAccessor operator+(index_t offset) const {
return ConstStridedRandomAccessor(ptr + offset * stride, stride);
}
C10_HOST_DEVICE
friend ConstStridedRandomAccessor operator+(
index_t offset,
const ConstStridedRandomAccessor& accessor
) {
return accessor + offset;
}
C10_HOST_DEVICE
ConstStridedRandomAccessor& operator-=(index_t offset) {
ptr -= offset * stride;
return *this;
}
C10_HOST_DEVICE
ConstStridedRandomAccessor operator-(index_t offset) const {
return ConstStridedRandomAccessor(ptr - offset * stride, stride);
}
// Note that this operator is well-defined when `this` and `other`
// represent the same sequences, i.e. when
// 1. this.stride == other.stride,
// 2. |other - this| / this.stride is an Integer.
C10_HOST_DEVICE
difference_type operator-(const ConstStridedRandomAccessor& other) const {
return (ptr - other.ptr) / stride;
}
// }
// Comparison operators {
C10_HOST_DEVICE
bool operator==(const ConstStridedRandomAccessor& other) const {
return (ptr == other.ptr) && (stride == other.stride);
}
C10_HOST_DEVICE
bool operator!=(const ConstStridedRandomAccessor& other) const {
return !(*this == other);
}
C10_HOST_DEVICE
bool operator<(const ConstStridedRandomAccessor& other) const {
return ptr < other.ptr;
}
C10_HOST_DEVICE
bool operator<=(const ConstStridedRandomAccessor& other) const {
return (*this < other) || (*this == other);
}
C10_HOST_DEVICE
bool operator>(const ConstStridedRandomAccessor& other) const {
return !(*this <= other);
}
C10_HOST_DEVICE
bool operator>=(const ConstStridedRandomAccessor& other) const {
return !(*this < other);
}
// }
protected:
PtrType ptr;
index_t stride;
};
template <
typename T,
typename index_t = int64_t,
template <typename U> class PtrTraits = DefaultPtrTraits
>
class StridedRandomAccessor
: public ConstStridedRandomAccessor<T, index_t, PtrTraits> {
public:
using difference_type = index_t;
using value_type = T;
using pointer = typename PtrTraits<T>::PtrType;
using reference = value_type&;
using BaseType = ConstStridedRandomAccessor<T, index_t, PtrTraits>;
using PtrType = typename PtrTraits<T>::PtrType;
// Constructors {
C10_HOST_DEVICE
StridedRandomAccessor(PtrType ptr, index_t stride)
: BaseType(ptr, stride)
{}
C10_HOST_DEVICE
explicit StridedRandomAccessor(PtrType ptr)
: BaseType(ptr)
{}
C10_HOST_DEVICE
StridedRandomAccessor()
: BaseType()
{}
// }
// Pointer-like operations {
C10_HOST_DEVICE
reference operator*() const {
return *this->ptr;
}
C10_HOST_DEVICE
value_type* operator->() const {
return reinterpret_cast<value_type*>(this->ptr);
}
C10_HOST_DEVICE
reference operator[](index_t idx) const {
return this->ptr[idx * this->stride];
}
// }
// Prefix/postfix increment/decrement {
C10_HOST_DEVICE
StridedRandomAccessor& operator++() {
this->ptr += this->stride;
return *this;
}
C10_HOST_DEVICE
StridedRandomAccessor operator++(int) {
StridedRandomAccessor copy(*this);
++*this;
return copy;
}
C10_HOST_DEVICE
StridedRandomAccessor& operator--() {
this->ptr -= this->stride;
return *this;
}
C10_HOST_DEVICE
StridedRandomAccessor operator--(int) {
StridedRandomAccessor copy(*this);
--*this;
return copy;
}
// }
// Arithmetic operations {
C10_HOST_DEVICE
StridedRandomAccessor& operator+=(index_t offset) {
this->ptr += offset * this->stride;
return *this;
}
C10_HOST_DEVICE
StridedRandomAccessor operator+(index_t offset) const {
return StridedRandomAccessor(this->ptr + offset * this->stride, this->stride);
}
C10_HOST_DEVICE
friend StridedRandomAccessor operator+(
index_t offset,
const StridedRandomAccessor& accessor
) {
return accessor + offset;
}
C10_HOST_DEVICE
StridedRandomAccessor& operator-=(index_t offset) {
this->ptr -= offset * this->stride;
return *this;
}
C10_HOST_DEVICE
StridedRandomAccessor operator-(index_t offset) const {
return StridedRandomAccessor(this->ptr - offset * this->stride, this->stride);
}
// Note that here we call BaseType::operator- version
C10_HOST_DEVICE
difference_type operator-(const BaseType& other) const {
return (static_cast<const BaseType&>(*this) - other);
}
// }
};
} // namespace at::native
| 6,835
| 21.635762
| 82
|
h
|
null |
pytorch-main/aten/src/ATen/native/TensorAdvancedIndexingUtils.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/native/IndexingUtils.h>
#include <ATen/native/TensorIterator.h>
namespace at::native {
namespace {
static std::string shapes_as_str(TensorList tensors) {
std::ostringstream os;
bool first = true;
for (auto& tensor : tensors) {
if (tensor.defined()) {
if (!first) {
os << ", ";
}
os << tensor.sizes();
first = false;
}
}
return os.str();
}
} // anonymous namespace
static std::tuple<bool, Tensor> canDispatchToMaskedFill(const Tensor& self, const torch::List<c10::optional<at::Tensor>>& indices,
const Tensor& value){
if (!(value.numel() ==1 && value.device().is_cpu())){
return std::make_tuple(false,Tensor());
}
int64_t num_ind = 0;
Tensor mask;
auto self_device = self.device();
for (const c10::optional<Tensor>& i: indices) {
if (!i.has_value() || !(*i).defined()){
num_ind++;
} else {
const Tensor &index = *i;
if ((index.scalar_type() != kByte && index.scalar_type() != kBool) ||
index.device() != self_device || mask.defined()){
return std::make_tuple(false, Tensor());
} else {
mask = index;
for (const auto j : c10::irange(index.dim())) {
int64_t srcIdx = num_ind + j;
TORCH_CHECK_INDEX(index.size(j) == self.size(srcIdx), "The shape of the mask ", index.sizes(), " at index ", j,
" does not match the shape of the indexed tensor ", self.sizes(), " at index ", srcIdx);
}
num_ind += mask.ndimension();
}
}
}
for (const auto i : c10::irange(num_ind, self.ndimension())) {
(void)i; //Suppress unused variable warning
mask = mask.unsqueeze(-1);
}
return std::make_tuple(true, mask);
}
static AdvancedIndex make_info(Tensor self, IOptTensorListRef orig) {
checkIndexTensorTypes(orig, /*allow_int*/ true);
// first expand BoolTensor (masks) or ByteTensor (masks) into 1 or more LongTensors
auto indices = expandTensors(self, orig);
// next broadcast all index tensors together
try {
indices = expand_outplace(indices);
} catch (std::exception& e) {
TORCH_CHECK_INDEX(false, "shape mismatch: indexing tensors could not be broadcast together"
" with shapes ", shapes_as_str(indices));
}
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
indices.emplace_back();
}
// if the non-null indices are not all adjacent, transpose self and indices
// together so that they're adjacent at the front
if (!hasContiguousSubspace(indices)) {
std::tie(self, indices) = transposeToFront(self, indices);
}
// Ensure indices are on the same device as self
for (auto & indice : indices) {
if (indice.defined() && indice.device() != self.device()) {
indice = indice.to(self.device());
}
}
for (auto & indice : indices) {
if (indice.defined() && indice.dtype() == at::kInt) {
indice = indice.to(at::kLong);
}
}
return AdvancedIndex(self, indices);
}
} // namespace at::native
| 3,072
| 31.691489
| 130
|
h
|
null |
pytorch-main/aten/src/ATen/native/TensorCompare.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
namespace c10 {
class Scalar;
}
namespace at {
class Tensor;
struct TensorIterator;
struct TensorIteratorBase;
}
namespace at::native {
using reduce_minmax_fn =
void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool);
using structured_reduce_minmax_fn =
void (*)(const Tensor&, const Tensor&, const Tensor&, int64_t, bool);
DECLARE_DISPATCH(structured_reduce_minmax_fn, max_stub);
DECLARE_DISPATCH(structured_reduce_minmax_fn, min_stub);
using where_fn = void (*)(TensorIterator &);
DECLARE_DISPATCH(where_fn, where_kernel);
using is_infinity_op_fn = void (*)(TensorIteratorBase &);
DECLARE_DISPATCH(is_infinity_op_fn, isposinf_stub);
DECLARE_DISPATCH(is_infinity_op_fn, isneginf_stub);
using mode_fn = void (*)(Tensor&, Tensor&, const Tensor&, int64_t, bool);
DECLARE_DISPATCH(mode_fn, mode_stub);
using clamp_tensor_fn = void (*)(TensorIteratorBase &);
DECLARE_DISPATCH(clamp_tensor_fn, clamp_stub);
namespace detail {
enum class ClampLimits {Min, Max, MinMax};
}
DECLARE_DISPATCH(void (*)(TensorIteratorBase &, const c10::Scalar&, const c10::Scalar&), clamp_scalar_stub);
DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_min_scalar_stub);
DECLARE_DISPATCH(void (*)(TensorIteratorBase &, c10::Scalar), clamp_max_scalar_stub);
using isin_default_fn = void (*)(const Tensor&, const Tensor&, bool, const Tensor&);
DECLARE_DISPATCH(isin_default_fn, isin_default_stub);
} // namespace at::native
| 1,490
| 28.82
| 108
|
h
|
null |
pytorch-main/aten/src/ATen/native/TensorConversions.h
|
#pragma once
#include <c10/core/Device.h>
#include <c10/core/Layout.h>
#include <c10/core/MemoryFormat.h>
#include <c10/core/ScalarType.h>
#include <c10/util/Optional.h>
namespace at {
class Tensor;
namespace native {
bool to_will_alias(
const Tensor& self,
c10::optional<ScalarType> dtype,
c10::optional<Layout> layout,
c10::optional<Device> device,
bool copy,
c10::optional<c10::MemoryFormat> optional_memory_format);
Tensor to_meta(const Tensor& tensor);
c10::optional<Tensor> to_meta(const c10::optional<Tensor>& tensor);
std::vector<Tensor> to_meta(at::ITensorListRef t_list);
} // namespace native
} // namespace at
| 652
| 24.115385
| 67
|
h
|
null |
pytorch-main/aten/src/ATen/native/TensorDimApply.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <c10/util/irange.h>
namespace at::native {
//input tensors are non-zero dim and non-empty
template<typename T1, typename T2, typename Function>
void tensor_dim_apply3(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim, Function func) {
int ndims = self.dim();
int tensor_dim_apply_has_finished = 0;
std::vector<int64_t> counter(ndims, 0);
T1* self_data = self.data_ptr<T1>();
T1* values_data = values.data_ptr<T1>();
T2* indices_data = indices.data_ptr<T2>();
int64_t self_stride = self.stride(dim);
int64_t values_stride = values.stride(dim);
int64_t indices_stride = indices.stride(dim);
int self_dim_size = self.size(dim);
while (!tensor_dim_apply_has_finished) {
func(self_data, values_data, indices_data, self_dim_size, self_stride, values_stride, indices_stride);
if (ndims == 1) {
break;
}
for (const auto dim_i : c10::irange(ndims)) {
if (dim_i == dim) {
if (dim_i == (ndims - 1)) {
tensor_dim_apply_has_finished = 1;
break;
}
continue;
}
counter[dim_i]++;
self_data += self.stride(dim_i);
values_data += values.stride(dim_i);
indices_data += indices.stride(dim_i);
if (counter[dim_i] == self.size(dim_i)) {
if (dim_i == ndims-1) {
tensor_dim_apply_has_finished = 1;
break;
} else {
self_data -= counter[dim_i]*self.stride(dim_i);
values_data -= counter[dim_i]*values.stride(dim_i);
indices_data -= counter[dim_i]*indices.stride(dim_i);
counter[dim_i] = 0;
}
} else {
break;
}
}
}
}
} // namespace at::native
| 1,727
| 29.857143
| 106
|
h
|
null |
pytorch-main/aten/src/ATen/native/TensorFactories.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/EmptyTensor.h>
#include <ATen/TensorIterator.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/scalar_tensor.h>
#endif
namespace at::native {
// Different combinations of row, col, and offset can lead to two cases:
//
// Case 1 - Trapezoid (Triangle as a special case): row + offset <= col
// Example A: offset > 0
// 1 1 0 0 0
// 1 1 1 0 0
// 1 1 1 1 0
// Example B: offset <= 0
// 0 0 0
// 1 0 0
// 1 1 0
// In this case, we calculate the number of elements in the first row and
// last row of the tril respectively, and then compute the tril size.
//
// Case 2 - Trapezoid + Rectangle: row + offset > col
// Example:
// 1 1 0
// 1 1 1
// 1 1 1
// In this case, we first calculate the size of top trapezoid, and then
// calculate the size of the bottom rectangle.
inline int64_t get_tril_size(int64_t row, int64_t col, int64_t offset) {
// If either dimension is 0 then the there is no tril
if (row == 0 || col == 0) {
return 0;
}
// number of elements in the first row of the tril
auto m_first_row = offset > 0 ?
std::min<int64_t>(col, 1 + offset) : // upper bounded by col
row + offset > 0; // either 0 or 1
// number of elements in the last row of the tril, bounded by [0, col]
auto m_last_row = std::max<int64_t>(0, std::min<int64_t>(col, row + offset));
// number of rows, bounded by [0, row]
auto n_row_all = std::max<int64_t>(0, std::min<int64_t>(row, row + offset));
auto n_row_trapezoid = (m_last_row - m_first_row + 1);
// calculate # of elements in the top trapezoid
auto tril_size = (m_first_row + m_last_row) * n_row_trapezoid >> 1;
// calculate # of elements in the bottom rectangle if there is any
auto diff_row = n_row_all - n_row_trapezoid;
if (diff_row > 0) {
tril_size += diff_row * col;
}
return tril_size;
}
inline void check_args(
int64_t row, int64_t col, c10::optional<Layout> layout_opt) {
TORCH_CHECK(row >= 0, "row must be non-negative, got", row);
TORCH_CHECK(col >= 0, "col must be non-negative, got", col);
if (layout_opt.has_value()) {
TORCH_CHECK(
*layout_opt == at::kStrided,
"only support layout=torch.strided, got",
*layout_opt)
}
}
using at::check_size_nonnegative;
// assumes maximum value in created tensor is n-1 (e.g., torch.randperm(n))
inline void check_supported_max_int_with_precision(int64_t n, const Tensor& tensor) {
// match defined() to behavior of checks below
TORCH_CHECK(at::scalar_tensor(n>0?n-1:n, tensor.options()).defined(),
"n is too large for result tensor type: '", tensor.toString(), "'");
// Ensure sufficient precision for floating point representation.
switch (tensor.scalar_type()) {
case at::ScalarType::Half:
TORCH_CHECK(n <= (int64_t(1) << 11) + 1, "n cannot be greater than 2049 for Half type.");
break;
case at::ScalarType::Float:
TORCH_CHECK(n <= (int64_t(1) << 24) + 1, "n cannot be greater than 2^24+1 for Float type.");
break;
case at::ScalarType::Double: // Unlikely to happen, but doesn't hurt to check
TORCH_CHECK(n <= (int64_t(1) << 53) + 1, "n cannot be greater than 2^53+1 for Double type.");
break;
default:
break;
}
}
// Called by `empty*` functions when deterministic algorithms are enabled to
// fill the tensor with NaN if it is floating point or complex type, or fill
// with max value if it is integer type
inline Tensor& fill_empty_deterministic_(Tensor& tensor) {
if (tensor.is_floating_point() || tensor.is_complex()) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
kBFloat16, kHalf, tensor.scalar_type(), "fill_empty_deterministic_", [&]() {
tensor.fill_(std::numeric_limits<scalar_t>::quiet_NaN());
});
} else {
AT_DISPATCH_INTEGRAL_TYPES_AND(
kBool, tensor.scalar_type(), "fill_empty_deterministic_", [&]() {
tensor.fill_(std::numeric_limits<scalar_t>::max());
});
}
return tensor;
}
// The ZeroTensor allocator ignores whatever allocation is requested and always
// gives you nullptr
struct ZeroTensorAllocator final : public at::Allocator {
ZeroTensorAllocator(at::Device device) : device_(device) {};
~ZeroTensorAllocator() override = default;
static void deleter(void* const pointer) {
TORCH_INTERNAL_ASSERT(!pointer);
}
DataPtr allocate(const size_t /*nbytes*/) const override {
return {nullptr, nullptr, &deleter, device_};
}
DeleterFnPtr raw_deleter() const override {
return deleter;
}
at::Device device_;
};
using binary_fn = void (*)(TensorIterator&);
DECLARE_DISPATCH(binary_fn, complex_stub);
DECLARE_DISPATCH(binary_fn, polar_stub);
} // namespace at::native
| 4,856
| 33.446809
| 99
|
h
|
null |
pytorch-main/aten/src/ATen/native/TensorIteratorDynamicCasting.h
|
#pragma once
#include <complex>
#include <type_traits>
#include <c10/core/ScalarType.h>
#include <c10/util/C++17.h>
#include <ATen/detail/FunctionTraits.h>
#include <ATen/native/TensorIterator.h>
// This file includes utilties for dynamic_casting done by TensorIterator, see CUDALoops.cuh and Loops.h.
// dynamic_casting handles when the types expected by the iterator do not match the types of the arguments
// to the function that is being called.
// On CUDA, the cast is currently pushed down into the kernel (for performance reasons).
// On CPU, there is currently an internal assert that a dynamic_cast is not needed.
namespace at::native {
// `needs_dynamic_casting` compares the types expected by iterator
// (i.e. dtypes of the operands) with the actual type of the arguments
// (and returns) of func_t
template<typename func_t, int nargs=function_traits<func_t>::arity>
struct needs_dynamic_casting {
static bool check(TensorIteratorBase& iter) {
using traits = function_traits<func_t>;
using cpp_type = typename traits::template arg<nargs - 1>::type;
using cpp_map = c10::CppTypeToScalarType<cpp_type>;
if (iter.input_dtype(nargs-1) != cpp_map::value) {
return true;
}
return needs_dynamic_casting<func_t, nargs - 1>::check(iter);
}
};
template<typename func_t>
struct needs_dynamic_casting<func_t, 0> {
static bool check(TensorIteratorBase& iter) {
using traits = function_traits<func_t>;
using cpp_type = typename traits::result_type;
// we could assert output numbers are correct here, but checks
// (including arity) are currently pushed outside of this struct.
if constexpr (std::is_void_v<cpp_type>) {
return false;
} else {
return iter.dtype(0) != c10::CppTypeToScalarType<cpp_type>::value;
}
}
};
} //namespace at::native
| 1,828
| 32.87037
| 106
|
h
|
null |
pytorch-main/aten/src/ATen/native/TensorTransformations.h
|
#include <ATen/core/Tensor.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/roll.h>
#endif
#include <c10/util/Exception.h>
namespace at::native {
static inline Tensor roll_common(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
TORCH_CHECK(!shifts.empty(), "`shifts` required");
if (dims.empty() && shifts.size() == 1) {
auto flattened = self.contiguous().view(self.numel());
return roll(flattened, shifts[0], 0).view(self.sizes());
}
TORCH_CHECK(
shifts.size() == dims.size(),
"shifts and dimensions must align. shifts: ", shifts.size(), ", dims:", dims.size()
);
AT_ASSERT(dims.size() > 1);
auto tail_shifts = shifts.slice(1);
auto tail_dims = dims.slice(1);
auto first_dim_rolled = roll(self, shifts[0], dims[0]);
return at::roll(first_dim_rolled, tail_shifts, tail_dims);
}
} // namespace at::native
| 896
| 27.935484
| 92
|
h
|
null |
pytorch-main/aten/src/ATen/native/TopKImpl.h
|
#pragma once
#include <ATen/core/TensorAccessor.h>
#include <ATen/NumericUtils.h>
namespace at::native {
#ifdef CPU_CAPABILITY
inline namespace CPU_CAPABILITY {
#else
inline namespace DEFAULT {
#endif
// Core topk loop, shared between CPU and QuantizedCPU
template <typename scalar_t, typename accscalar_t>
void topk_impl_loop(
const int64_t mode_values_stride,
const int64_t mode_indices_stride,
const int64_t tmp_values_stride,
const int64_t k,
const int64_t dim_size,
const bool largest,
const bool sorted,
char** data, const int64_t* strides, const int64_t n) {
using elem_t = std::pair<accscalar_t, int64_t>;
std::vector<elem_t> queue(dim_size);
for (const auto i : c10::irange(n)) {
TensorAccessor<scalar_t, 1> mode_values(
reinterpret_cast<scalar_t*>(data[0] + i * strides[0]),
&k, &mode_values_stride);
TensorAccessor<int64_t, 1> mode_indices(
reinterpret_cast<int64_t*>(data[1] + i * strides[1]),
&k, &mode_indices_stride);
TensorAccessor<scalar_t, 1> tmp_values(
reinterpret_cast<scalar_t*>(data[2] + i * strides[2]),
&dim_size, &tmp_values_stride);
auto n = dim_size;
auto use_partial_sort = k * 64 <= n;
for (const auto j : c10::irange(n)) {
queue[j].first = tmp_values[j];
queue[j].second = j;
}
// we want nan to be sorted as top for numpy compatibility
if (use_partial_sort) {
if (largest) {
std::partial_sort(queue.begin(), queue.begin() + k, queue.end(),
[](const elem_t& x, const elem_t& y) -> bool {
return ((_isnan<accscalar_t>(x.first) && !_isnan<accscalar_t>(y.first)) || (x.first > y.first));
});
} else {
std::partial_sort(queue.begin(), queue.begin() + k, queue.end(),
[](const elem_t& x, const elem_t& y) -> bool {
return ((!_isnan<accscalar_t>(x.first) && _isnan<accscalar_t>(y.first)) || (x.first < y.first));
});
}
} else {
if (largest) {
std::nth_element(queue.begin(), queue.begin() + k - 1, queue.end(),
[](const elem_t& x, const elem_t& y) -> bool {
return ((_isnan<accscalar_t>(x.first) && !_isnan<accscalar_t>(y.first)) || (x.first > y.first));
});
if (sorted) {
std::sort(queue.begin(), queue.begin() + k - 1,
[](const elem_t& x, const elem_t& y) -> bool {
return ((_isnan<accscalar_t>(x.first) && !_isnan<accscalar_t>(y.first)) || (x.first > y.first));
});
}
} else {
std::nth_element(queue.begin(), queue.begin() + k -1, queue.end(),
[](const elem_t& x, const elem_t& y) -> bool {
return ((!_isnan<accscalar_t>(x.first) && _isnan<accscalar_t>(y.first)) || (x.first < y.first));
});
if (sorted) {
std::sort(queue.begin(), queue.begin() + k -1,
[](const elem_t& x, const elem_t& y) -> bool {
return ((!_isnan<accscalar_t>(x.first) && _isnan<accscalar_t>(y.first)) || (x.first < y.first));
});
}
}
}
for (const auto j : c10::irange(k)) {
mode_values[j] = queue[j].first;
mode_indices[j] = queue[j].second;
}
}
}
} // namespace CPU_CAPABILITY
} // namespace at::native
| 3,300
| 34.117021
| 110
|
h
|
null |
pytorch-main/aten/src/ATen/native/TransposeType.h
|
#pragma once
#include <c10/util/Exception.h>
namespace at::native {
// Used as an interface between the different BLAS-like libraries
enum class TransposeType {
NoTranspose,
Transpose,
ConjTranspose,
};
// Transforms TransposeType into the BLAS / LAPACK format
static inline char to_blas(TransposeType trans) {
switch (trans) {
case TransposeType::Transpose: return 'T';
case TransposeType::NoTranspose: return 'N';
case TransposeType::ConjTranspose: return 'C';
}
TORCH_INTERNAL_ASSERT(false, "Invalid transpose type");
}
} // namespace at::native
| 578
| 23.125
| 65
|
h
|
null |
pytorch-main/aten/src/ATen/native/TriangularOpsUtils.h
|
#include <ATen/core/Tensor.h>
#include <ATen/native/LinearAlgebraUtils.h>
namespace at::native {
/*
* Given batches of matrices with arbitrary batch dim,
* computes the number of batches for Triu and Tril. This ignores stride 0 dimension
*/
static inline int64_t batchCountTrilTriu(const Tensor& batched_matrices) {
int64_t result = 1;
for (int64_t i = 0; i < batched_matrices.ndimension() - 2; i++) {
if (batched_matrices.stride(i) != 0) {
result *= batched_matrices.size(i);
}
}
return result;
}
/* Checks a necessary property for the triu and tril implementations, hence the name.
* Here batch contiguity is checked for tensors with greater than 4 dimensions.
* Contiguous tensors and tensors with less than 3 dimensions pass this check
*/
static inline std::tuple<bool, Tensor> checkTrilTriuBatchContiguous(const Tensor& tensor, bool allow_zero_stride) {
// Complete contiguity is the most desired property, which is why
// we return true if the tensor is contiguous
if (tensor.is_contiguous()) {
auto default_strides_for_size = batched_matrix_contiguous_strides(tensor.sizes());
if (tensor.strides() == default_strides_for_size) {
return std::make_tuple(true, tensor);
} else {
return std::make_tuple(false, tensor.as_strided(tensor.sizes(), default_strides_for_size));
}
}
int64_t dims = tensor.dim();
// Tensors with dimension less than 4 are handled by default
if (allow_zero_stride && dims <= 3) {
return std::make_tuple(true, tensor);
}
int64_t expected_stride = tensor.size(-1) * tensor.size(-2);
for (int64_t i = dims - 3; i >= 0; i--) {
// Skip trivial dimension;
if (allow_zero_stride && i == 0 && (tensor.stride(i) == 0 || tensor.size(i) == 1)) {
continue;
}
if (expected_stride != tensor.stride(i)) {
return std::make_tuple(false, tensor.contiguous());
}
expected_stride *= tensor.size(i);
}
return std::make_tuple(true, tensor);
}
} // namespace at::native
| 2,002
| 33.534483
| 115
|
h
|
null |
pytorch-main/aten/src/ATen/native/TypeProperties.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/core/IListRef.h>
namespace at::native {
struct ResultTypeState {
c10::ScalarType dimResult = ScalarType::Undefined;
c10::ScalarType wrappedResult = ScalarType::Undefined;
c10::ScalarType zeroResult = ScalarType::Undefined;
};
TORCH_API ResultTypeState update_result_type_state(const Tensor& tensor, const ResultTypeState& in_state);
TORCH_API ResultTypeState update_result_type_state(const Scalar& scalar, const ResultTypeState& in_state);
TORCH_API ScalarType result_type(const ResultTypeState& state);
TORCH_API ScalarType result_type(ITensorListRef tensors);
} // namespace at::native
| 658
| 30.380952
| 106
|
h
|
null |
pytorch-main/aten/src/ATen/native/UnaryOps.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <ATen/Generator.h>
#include <c10/core/Scalar.h>
#include <stdexcept>
namespace at {
class Tensor;
class TensorBase;
struct TensorIteratorBase;
}
namespace at::native {
using unary_fn = void(*)(TensorIteratorBase&);
using unary_fn_with_scalar = void(*)(TensorIteratorBase&, const Scalar& a);
inline namespace CPU_CAPABILITY {
void conj_kernel(TensorIteratorBase &iter);
void neg_kernel(TensorIteratorBase &iter);
void reciprocal_kernel(TensorIteratorBase &iter);
void rsqrt_kernel(TensorIteratorBase& iter);
void sqrt_kernel(TensorIteratorBase& iter);
} // namespace CPU_CAPABILITY
DECLARE_DISPATCH(unary_fn, abs_stub);
DECLARE_DISPATCH(unary_fn, angle_stub);
DECLARE_DISPATCH(unary_fn, conj_physical_stub);
DECLARE_DISPATCH(unary_fn, acos_stub);
DECLARE_DISPATCH(unary_fn, acosh_stub);
DECLARE_DISPATCH(unary_fn, asinh_stub);
DECLARE_DISPATCH(unary_fn, atanh_stub);
DECLARE_DISPATCH(unary_fn, asin_stub);
DECLARE_DISPATCH(unary_fn, atan_stub);
DECLARE_DISPATCH(unary_fn, bitwise_not_stub);
DECLARE_DISPATCH(unary_fn, logical_not_stub);
DECLARE_DISPATCH(unary_fn, ceil_stub);
DECLARE_DISPATCH(unary_fn, cos_stub);
DECLARE_DISPATCH(unary_fn, cosh_stub);
DECLARE_DISPATCH(unary_fn, digamma_stub);
DECLARE_DISPATCH(unary_fn, special_entr_stub);
DECLARE_DISPATCH(unary_fn, special_erfcx_stub);
DECLARE_DISPATCH(unary_fn, erf_stub);
DECLARE_DISPATCH(unary_fn, erfc_stub);
DECLARE_DISPATCH(unary_fn, erfinv_stub);
DECLARE_DISPATCH(unary_fn, exp_stub);
DECLARE_DISPATCH(unary_fn, exp2_stub);
DECLARE_DISPATCH(unary_fn, expm1_stub);
DECLARE_DISPATCH(unary_fn, floor_stub);
DECLARE_DISPATCH(unary_fn, frac_stub);
DECLARE_DISPATCH(unary_fn, frexp_stub);
DECLARE_DISPATCH(unary_fn, i0_stub);
DECLARE_DISPATCH(unary_fn, special_i0e_stub);
DECLARE_DISPATCH(unary_fn, special_i1_stub);
DECLARE_DISPATCH(unary_fn, special_i1e_stub);
DECLARE_DISPATCH(unary_fn, log_stub);
DECLARE_DISPATCH(unary_fn, log10_stub);
DECLARE_DISPATCH(unary_fn, log1p_stub);
DECLARE_DISPATCH(unary_fn, log2_stub);
DECLARE_DISPATCH(unary_fn, special_ndtri_stub);
DECLARE_DISPATCH(unary_fn, special_log_ndtr_stub);
DECLARE_DISPATCH(unary_fn, neg_stub);
DECLARE_DISPATCH(unary_fn, reciprocal_stub);
DECLARE_DISPATCH(unary_fn, round_stub);
DECLARE_DISPATCH(unary_fn, rsqrt_stub);
DECLARE_DISPATCH(unary_fn, sigmoid_stub);
DECLARE_DISPATCH(unary_fn_with_scalar, logit_stub);
DECLARE_DISPATCH(unary_fn, sign_stub);
DECLARE_DISPATCH(unary_fn, signbit_stub);
DECLARE_DISPATCH(unary_fn, sgn_stub);
DECLARE_DISPATCH(unary_fn, sin_stub);
DECLARE_DISPATCH(unary_fn, sinc_stub);
DECLARE_DISPATCH(unary_fn, sinh_stub);
DECLARE_DISPATCH(unary_fn, sqrt_stub);
DECLARE_DISPATCH(unary_fn, tan_stub);
DECLARE_DISPATCH(unary_fn, tanh_stub);
DECLARE_DISPATCH(unary_fn, trigamma_stub);
DECLARE_DISPATCH(unary_fn, trunc_stub);
DECLARE_DISPATCH(unary_fn, lgamma_stub);
DECLARE_DISPATCH(unary_fn, special_airy_ai_stub);
DECLARE_DISPATCH(unary_fn, special_bessel_j0_stub);
DECLARE_DISPATCH(unary_fn, special_bessel_j1_stub);
DECLARE_DISPATCH(unary_fn, special_bessel_y0_stub);
DECLARE_DISPATCH(unary_fn, special_bessel_y1_stub);
DECLARE_DISPATCH(unary_fn, special_modified_bessel_i0_stub);
DECLARE_DISPATCH(unary_fn, special_modified_bessel_i1_stub);
DECLARE_DISPATCH(unary_fn, special_modified_bessel_k0_stub);
DECLARE_DISPATCH(unary_fn, special_modified_bessel_k1_stub);
DECLARE_DISPATCH(unary_fn, special_scaled_modified_bessel_k0_stub);
DECLARE_DISPATCH(unary_fn, special_scaled_modified_bessel_k1_stub);
DECLARE_DISPATCH(unary_fn, special_spherical_bessel_j0_stub);
// NB: these are actually defined in Distribution
DECLARE_DISPATCH(void(*)(const TensorBase&, const TensorBase&, c10::optional<Generator>), bernoulli_tensor_stub);
DECLARE_DISPATCH(void(*)(const TensorBase&, const double, c10::optional<Generator>), bernoulli_scalar_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional<Generator>), cauchy_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional<Generator>), exponential_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, c10::optional<Generator>), geometric_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional<Generator>), log_normal_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const double, const double, c10::optional<Generator>), uniform_stub);
DECLARE_DISPATCH(void(*)(const TensorBase&, const double, const double, c10::optional<Generator>), normal_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const uint64_t, const int64_t, c10::optional<Generator>), random_from_to_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional<Generator>), random_full_64_bits_range_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, c10::optional<Generator>), random_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t, const double), kaiser_window_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const int64_t), polygamma_stub);
DECLARE_DISPATCH(void(*)(TensorIteratorBase&, const Scalar& a, const Scalar& b), clamp_stub);
DECLARE_DISPATCH(
void (*)(Tensor&, const Tensor&, int64_t, c10::optional<Generator>),
multinomial_with_replacement_stub);
DECLARE_DISPATCH(
void (*)(
TensorIteratorBase&,
c10::optional<double>,
c10::optional<double>,
c10::optional<double>),
nan_to_num_stub);
DECLARE_DISPATCH(void (*)(TensorIteratorBase&, int64_t), round_decimals_stub);
// Missing unary functions
// digamma
// lgamma
// erfinv
// clone
// contiguous
// zero
} // namespace at::native
| 5,612
| 41.847328
| 125
|
h
|
null |
pytorch-main/aten/src/ATen/native/UnfoldBackward.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/NonEmptyUtils.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/arange.h>
#endif
namespace at::native {
using unfold_backward_fn = void (*)(
Tensor& grad_in,
const Tensor& grad,
int64_t dim,
int64_t size,
int64_t step
);
DECLARE_DISPATCH(unfold_backward_fn, unfold_backward_stub);
namespace {
// Note on naming: it is unconventional.
// grad_in does not mean that it is a gradient wrt to input,
// grad_in/grad_out is just an input/output of unfold_backward kernel.
static C10_UNUSED TensorIterator _make_unfold_backward_iter_over_grad_out(
Tensor& grad_out,
const Tensor& grad_in,
int64_t dim,
int64_t size,
int64_t step
) {
dim = maybe_wrap_dim(dim, grad_out.dim());
// last dim stores the folds
auto grad_out_dim_size = ensure_nonempty_size(grad_out, dim);
auto grad_in_dim_size = ensure_nonempty_size(grad_in, dim);
// dictates the number of elements to iterate over
// in dimension `dim`
auto iter_dim_size = std::min(
grad_out_dim_size,
(grad_in_dim_size - 1) * step + size
);
/* prepare grad_out for TensorIterator { */
auto grad_out_strides = ensure_nonempty_vec(grad_out.strides().vec());
auto grad_out_sizes = ensure_nonempty_vec(grad_out.sizes().vec());
grad_out_sizes[dim] = iter_dim_size;
auto grad_out_restrided = grad_out.as_strided(
grad_out_sizes, grad_out_strides
);
/* } */
/* prepare grad_in for TensorIterator { */
auto grad_in_strides = ensure_nonempty_vec(grad_in.strides().vec());
auto grad_in_sizes = ensure_nonempty_vec(grad_in.sizes().vec());
// set strides for dim to 0
// and size to 1 because
// this dimension is indexed inside the kernel
grad_in_strides[dim] = 0;
grad_in_sizes[dim] = 1;
grad_in_strides.pop_back();
grad_in_sizes.pop_back();
auto grad_in_restrided = grad_in.squeeze(-1).as_strided(
grad_in_sizes, grad_in_strides
);
/* } */
// During the TensorIterator iteration we have to know
// i_dim in grad_out[i_1,...,i_dim,...i_n],
// idx_dim stores this information
/* prepare idx_dim for TensorIterator { */
auto idx_dim = at::arange(
0, iter_dim_size, grad_in.options().dtype(at::kLong)
);
auto grad_out_dim = ensure_nonempty_dim(grad_out.dim());
auto idx_dim_strides = std::vector<int64_t>(grad_out_dim, 0);
auto idx_dim_sizes = std::vector<int64_t>(grad_out_dim, 1);
idx_dim_strides[dim] = 1;
idx_dim_sizes[dim] = iter_dim_size;
// idx_dim size will broadcast over determined by grad_out sizes in TensorIterator
auto idx_dim_restrided = idx_dim.as_strided(idx_dim_sizes, idx_dim_strides);
/* } */
auto iter = TensorIteratorConfig()
.set_check_mem_overlap(false)
.check_all_same_dtype(false)
.resize_outputs(false)
.add_owned_output(grad_out_restrided)
.add_owned_input(grad_in_restrided)
.add_owned_input(idx_dim_restrided)
.build();
return iter;
}
}
} // namespace at::native
| 3,075
| 26.221239
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/native/batch_norm.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/native/DispatchStub.h>
namespace at::native {
using batch_norm_fn = void (*)(Tensor&, const Tensor&, const Tensor&,
const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, bool, double);
using batch_norm_collect_stats_fn = void (*)(Tensor&, Tensor&, const Tensor&);
using batch_norm_backward_fn = void(*)(Tensor&, Tensor&, Tensor&, const Tensor&,
const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, const Tensor&, bool, double);
DECLARE_DISPATCH(batch_norm_fn, batch_norm_cpu_stub);
DECLARE_DISPATCH(batch_norm_collect_stats_fn, batch_norm_cpu_collect_stats_stub);
DECLARE_DISPATCH(batch_norm_backward_fn, batch_norm_cpu_backward_stub);
// TensorAccessor when it is defined to work around undefined...
template <typename scalar_t>
static TensorAccessor<scalar_t, 1> conditional_accessor_1d(const Tensor& t) {
if (! t.defined()) {
return TensorAccessor<scalar_t, 1>(nullptr, nullptr, nullptr);
}
return t.accessor<scalar_t, 1>();
}
template <typename scalar_t>
static scalar_t* conditional_data_ptr(const Tensor& t) {
return t.defined() ? t.contiguous().data_ptr<scalar_t>()
: nullptr;
}
} // namespace at::native
| 1,265
| 36.235294
| 112
|
h
|
null |
pytorch-main/aten/src/ATen/native/group_norm.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <cstdint>
namespace at {
class Tensor;
namespace native {
using forward_fn = void (*)(
const Tensor& /* X */,
const Tensor& /* gamma */,
const Tensor& /* beta */,
int64_t /* N */,
int64_t /* C */,
int64_t /* HxW */,
int64_t /* group */,
double /* eps */,
Tensor& /* Y */,
Tensor& /* mean */,
Tensor& /* rstd */);
using backward_fn = void (*)(
const Tensor& /* dY */,
const Tensor& /* X */,
const Tensor& /* mean */,
const Tensor& /* rstd */,
const Tensor& /* gamma */,
int64_t /* N */,
int64_t /* C */,
int64_t /* HxW */,
int64_t /* group */,
Tensor& /* dX */,
Tensor& /* dgamma */,
Tensor& /* dbeta */);
DECLARE_DISPATCH(forward_fn, GroupNormKernel);
DECLARE_DISPATCH(backward_fn, GroupNormBackwardKernel);
} // namespace native
} // namespace at
| 907
| 20.116279
| 55
|
h
|
null |
pytorch-main/aten/src/ATen/native/im2col.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/Parallel.h>
#include <ATen/native/cpu/utils.h>
#include <c10/util/irange.h>
#include <algorithm>
namespace at::native {
template <typename T>
static void im2col(
const T* data_im,
const int64_t channels,
const int64_t height,
const int64_t width,
const int64_t output_height,
const int64_t output_width,
const int64_t kernel_h,
const int64_t kernel_w,
const int64_t pad_h,
const int64_t pad_w,
const int64_t stride_h,
const int64_t stride_w,
const int64_t dilation_h,
const int64_t dilation_w,
T* data_col,
bool is_channels_last = false) {
const int64_t height_col = output_height;
const int64_t width_col = output_width;
const int64_t channels_col = channels * kernel_h * kernel_w;
if (is_channels_last) {
at::parallel_for(0, height_col * width_col, 0, [&](int64_t begin, int64_t end) {
int64_t h_col{0}, w_col{0};
data_index_init(begin, h_col, height_col, w_col, width_col);
for (const auto i_col : c10::irange(begin, end)) {
for (const auto h_offset : c10::irange(kernel_h)) {
int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h;
for (const auto w_offset : c10::irange(kernel_w)) {
int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w;
const T* slice_im = data_im + (h_im * width + w_im) * channels;
T* slice_col = data_col + (i_col * kernel_h * kernel_w + h_offset * kernel_w + w_offset) * channels;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
std::copy_n(slice_im, channels, slice_col);
} else {
std::fill_n(slice_col, channels, T(0));
}
}
}
// move the the next index
data_index_step(h_col, height_col, w_col, width_col);
}
});
} else {
at::parallel_for(0, channels_col, 0, [&](int64_t begin, int64_t end) {
int64_t c_im{0}, h_offset{0}, w_offset{0};
data_index_init(begin, c_im, channels, h_offset, kernel_h, w_offset, kernel_w);
for (const auto c_col : c10::irange(begin, end)) {
for (const auto h_col : c10::irange(height_col)) {
int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h;
for (const auto w_col : c10::irange(width_col)) {
int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w;
data_col[(c_col * height_col + h_col) * width_col + w_col] =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width)
? data_im[(c_im * height + h_im) * width + w_im]
: static_cast<T>(0);
}
}
// move to the next index
data_index_step(c_im, channels, h_offset, kernel_h, w_offset, kernel_w);
}
});
}
}
template <typename T>
static void col2im(
const T* data_col,
const int64_t channels,
const int64_t height,
const int64_t width,
const int64_t output_height,
const int64_t output_width,
const int64_t kernel_h,
const int64_t kernel_w,
const int64_t pad_h,
const int64_t pad_w,
const int64_t stride_h,
const int64_t stride_w,
const int64_t dilation_h,
const int64_t dilation_w,
T* data_im,
bool is_channels_last = false) {
std::fill_n(data_im, height * width * channels, T(0));
const int64_t height_col = output_height;
const int64_t width_col = output_width;
const int64_t channels_col = channels * kernel_h * kernel_w;
if (is_channels_last) {
for (const auto h_col : c10::irange(height_col)) {
for (const auto w_col : c10::irange(width_col)) {
for (const auto h_offset : c10::irange(kernel_h)) {
int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h;
for (const auto w_offset : c10::irange(kernel_w)) {
int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w;
T* slice_im = data_im + (h_im * width + w_im) * channels;
const T* slice_col = data_col + ((h_col * width_col + w_col) * kernel_h * kernel_w
+ h_offset * kernel_w + w_offset) * channels;
if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width) {
std::transform(slice_col, slice_col + channels, slice_im, slice_im, std::plus<T>());
}
}
}
}
}
} else {
for (const auto c_col : c10::irange(channels_col)) {
int64_t w_offset = c_col % kernel_w;
int64_t h_offset = (c_col / kernel_w) % kernel_h;
int64_t c_im = c_col / kernel_h / kernel_w;
for (const auto h_col : c10::irange(height_col)) {
int64_t h_im = h_col * stride_h - pad_h + h_offset * dilation_h;
for (const auto w_col : c10::irange(width_col)) {
int64_t w_im = w_col * stride_w - pad_w + w_offset * dilation_w;
if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width)
data_im[(c_im * height + h_im) * width + w_im] +=
data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
}
}
}
} // namespace at::native
| 5,227
| 33.853333
| 112
|
h
|
null |
pytorch-main/aten/src/ATen/native/im2col_shape_check.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/TensorUtils.h>
#include <ATen/div_rtn.h>
namespace at::native {
static inline void col2im_shape_check(
const Tensor& input,
const Tensor& grad_output,
int64_t output_height,
int64_t output_width,
int64_t kernel_height,
int64_t kernel_width,
int64_t dilation_height,
int64_t dilation_width,
int64_t pad_height,
int64_t pad_width,
int64_t stride_height,
int64_t stride_width) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
TORCH_CHECK(
pad_width >= 0 && pad_height >= 0,
"padding should be non-negative, but got pad_height: ",
pad_height,
" pad_width: ",
pad_width);
int64_t ndim = input.ndimension();
// allow dim=0 only the batch dimension.
TORCH_CHECK(
(ndim == 2 && input.size(0) != 0 && input.size(1) != 0) ||
(ndim == 3 && input.size(1) != 0 && input.size(2) != 0),
"Expected 2D or 3D (batch mode) tensor for input with possibly 0 batch size and non-zero dimensions for input, but got: ",
input.sizes());
int64_t batch_dim = (ndim == 3) ? 0 : -1;
int64_t n_input_plane = input.size(batch_dim + 1);
if (n_input_plane % (kernel_width * kernel_height) != 0) {
AT_ERROR(
"Expected size of input's dimension 1 to be divisible by the "
"product of kernel_size, but got input.size(1)=",
n_input_plane,
" and kernel_size=(",
kernel_height,
", ",
kernel_width,
").");
}
int64_t input_length = input.size(batch_dim + 2);
int64_t n_blocks_height =
div_rtn<int64_t>(
output_height + 2 * pad_height -
dilation_height * (kernel_height - 1) - 1,
stride_height) +
1;
int64_t n_blocks_width = div_rtn<int64_t>(
output_width + 2 * pad_width -
dilation_width * (kernel_width - 1) - 1,
stride_width) +
1;
if (input_length != (n_blocks_height * n_blocks_width)) {
AT_ERROR(
"Given output_size=(",
output_height,
", ",
output_width,
"), kernel_size=(",
kernel_height,
", ",
kernel_width,
"), dilation=(",
dilation_height,
", ",
dilation_width,
"), padding=(",
pad_height,
", ",
pad_width,
"), stride=(",
stride_height,
", ",
stride_width,
"), expected size of input's dimension 2 to match the calculated number of ",
"sliding blocks ",
n_blocks_height,
" * ",
n_blocks_width,
" = ",
(n_blocks_height * n_blocks_width),
", but got input.size(2)=",
input_length,
".");
}
TORCH_CHECK(
n_blocks_height >= 1 && n_blocks_width >= 1,
"Given output_size=(", output_height, ", ", output_width, "), ",
"kernel_size=(", kernel_height, ", ", kernel_width, "), ",
"dilation=(", dilation_height, ", ", dilation_width, "), ",
"padding=(", pad_height, ", ", pad_width, "), ",
"stride=(", stride_height, ", ", stride_width, "), ",
"calculated shape of the array of sliding blocks as ",
"(", n_blocks_height, ", ", n_blocks_width, "), ",
"which is too small (non-positive)");
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Expected output spatial size to be positive, but got: output_size=(",
output_height,
", ",
output_width,
").");
}
}
static inline void im2col_shape_check(
const Tensor& input,
const Tensor& grad_output,
int64_t kernel_height,
int64_t kernel_width,
int64_t dilation_height,
int64_t dilation_width,
int64_t pad_height,
int64_t pad_width,
int64_t stride_height,
int64_t stride_width) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
TORCH_CHECK(
pad_width >= 0 && pad_height >= 0,
"padding should be non-negative, but got pad_height: ",
pad_height,
" pad_width: ",
pad_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
int64_t ndim = input.ndimension();
// allow dim=0 only the batch dimension.
bool valid_dims = input.size(1) != 0 && input.size(2) != 0;
TORCH_CHECK(
(ndim == 3 && input.size(0) && valid_dims) ||
(ndim == 4 && valid_dims && input.size(3) != 0),
"Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ",
input.sizes());
int64_t dim_batch = 0;
if (ndim == 3) {
dim_batch = -1;
}
int64_t input_height = input.size(dim_batch + 2);
int64_t input_width = input.size(dim_batch + 3);
int64_t output_height = div_rtn<int64_t>(
input_height + 2 * pad_height -
(dilation_height * (kernel_height - 1) + 1),
stride_height) +
1;
int64_t output_width = div_rtn<int64_t>(
input_width + 2 * pad_width -
(dilation_width * (kernel_width - 1) + 1),
stride_width) +
1;
if (output_height < 1 || output_width < 1) {
AT_ERROR(
"Given input with spatial size (",
input_height,
", ",
input_height,
"), kernel_size=(",
kernel_height,
", ",
kernel_width,
"), dilation=(",
dilation_height,
", ",
dilation_width,
"), padding=(",
pad_height,
", ",
pad_width,
"), calculated shape of the array of sliding blocks as (",
output_height,
", ",
output_width,
"), but its components must be at least one.");
}
}
} // namespace at::native
| 6,913
| 28.67382
| 128
|
h
|
null |
pytorch-main/aten/src/ATen/native/layer_norm.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/native/DispatchStub.h>
#include <c10/util/accumulate.h>
namespace at::native {
namespace {
C10_ALWAYS_INLINE std::pair<int64_t, int64_t> _check_layer_norm_inputs(
const Tensor& input,
IntArrayRef normalized_shape,
const Tensor& weight /* optional */,
const Tensor& bias /* optional */) {
const int normalized_ndim = normalized_shape.size();
TORCH_CHECK(
normalized_ndim >= 1,
"Expected normalized_shape to be at least 1-dimensional, i.e., ",
"containing at least one element, but got normalized_shape = ",
normalized_shape);
TORCH_CHECK(
!weight.defined() || weight.sizes().equals(normalized_shape),
"Expected weight to be of same shape as normalized_shape, but got ",
"weight of shape ",
weight.sizes(),
" and normalized_shape = ",
normalized_shape);
TORCH_CHECK(
!bias.defined() || bias.sizes().equals(normalized_shape),
"Expected bias to be of same shape as normalized_shape, but got ",
"bias of shape ",
bias.sizes(),
" and normalized_shape = ",
normalized_shape);
const auto input_shape = input.sizes();
const auto input_ndim = input.dim();
if (input_ndim < normalized_ndim ||
!input_shape.slice(input_ndim - normalized_ndim)
.equals(normalized_shape)) {
std::stringstream ss;
ss << "Given normalized_shape=" << normalized_shape
<< ", expected input with shape [*";
for (auto size : normalized_shape) {
ss << ", " << size;
}
ss << "], but got input of size" << input_shape;
AT_ERROR(ss.str());
}
const int axis = input_ndim - normalized_ndim;
const int64_t M =
c10::multiply_integers(input_shape.cbegin(), input_shape.cbegin() + axis);
const int64_t N =
c10::multiply_integers(input_shape.cbegin() + axis, input_shape.cend());
return std::make_pair(M, N);
}
} // namespace
void layer_norm_cpu_out(
at::Tensor& out,
const at::Tensor& input,
const Tensor& gamma,
const Tensor& beta,
double eps,
int64_t M,
int64_t N);
using forward_fn = void (*)(
const Tensor& /* X */,
const Tensor& /* gamma */,
const Tensor& /* beta */,
int64_t /* M */,
int64_t /* N */,
double /* eps */,
Tensor* /* Y */,
Tensor* /* mean */,
Tensor* /* rstd */);
using backward_fn = void (*)(
const Tensor& /* dY */,
const Tensor& /* X */,
const Tensor& /* mean */,
const Tensor& /* rstd */,
const Tensor& /* gamma */,
int64_t /* M */,
int64_t /* N */,
Tensor* /* dX */,
Tensor* /* dgamma */,
Tensor* /* dbeta */);
DECLARE_DISPATCH(forward_fn, LayerNormKernel);
DECLARE_DISPATCH(backward_fn, LayerNormBackwardKernel);
} // namespace at::native
| 2,796
| 26.693069
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/native/vol2col.h
|
#pragma once
#include <cstring>
namespace at::native {
template <typename T>
static void vol2col(
const T* data_vol,
const int64_t channels,
const int64_t depth,
const int64_t height,
const int64_t width,
const int64_t depth_col,
const int64_t height_col,
const int64_t width_col,
const int64_t kT,
const int64_t kernel_height,
const int64_t kernel_width,
const int64_t pT,
const int64_t pH,
const int64_t pW,
const int64_t dT,
const int64_t dH,
const int64_t dW,
const int64_t dilationT,
const int64_t dilationH,
const int64_t dilationW,
T* data_col) {
int64_t c, t, h, w;
int64_t channels_col = channels * kT * kernel_height * kernel_width;
for (c = 0; c < channels_col; ++c) {
int64_t w_offset = c % kernel_width;
int64_t h_offset = (c / kernel_width) % kernel_height;
int64_t t_offset = (c / kernel_width / kernel_height) % kT;
int64_t c_vol = c / kT / kernel_height / kernel_width;
for (t = 0; t < depth_col; ++t) {
int64_t t_pad = t * dT - pT + t_offset * dilationT;
for (h = 0; h < height_col; ++h) {
int64_t h_pad = h * dH - pH + h_offset * dilationH;
for (w = 0; w < width_col; ++w) {
int64_t w_pad = w * dW - pW + w_offset * dilationW;
if (t_pad >= 0 && t_pad < depth && h_pad >= 0 && h_pad < height &&
w_pad >= 0 && w_pad < width)
data_col[((c * depth_col + t) * height_col + h) * width_col + w] =
data_vol
[((c_vol * depth + t_pad) * height + h_pad) * width +
w_pad];
else
data_col[((c * depth_col + t) * height_col + h) * width_col + w] =
0;
}
}
}
}
}
template <typename T>
static void col2vol(
const T* data_col,
const int64_t channels,
const int64_t depth,
const int64_t height,
const int64_t width,
const int64_t out_depth,
const int64_t out_height,
const int64_t out_width,
const int64_t kT,
const int64_t kernel_height,
const int64_t kernel_width,
const int64_t pT,
const int64_t pH,
const int64_t pW,
const int64_t dT,
const int64_t dH,
const int64_t dW,
const int64_t dilationT,
const int64_t dilationH,
const int64_t dilationW,
T* data_vol) {
memset(data_vol, 0, sizeof(T) * depth * height * width * channels);
int64_t depth_col = out_depth;
int64_t height_col = out_height;
int64_t width_col = out_width;
int64_t channels_col = channels * kT * kernel_height * kernel_width;
for (int64_t c = 0; c < channels_col; ++c) {
int64_t w_offset = c % kernel_width;
int64_t h_offset = (c / kernel_width) % kernel_height;
int64_t t_offset = (c / kernel_width / kernel_height) % kT;
int64_t c_vol = c / kT / kernel_height / kernel_width;
for (int64_t t = 0; t < depth_col; ++t) {
int64_t t_pad = t * dT - pT + t_offset * dilationT;
for (int64_t h = 0; h < height_col; ++h) {
int64_t h_pad = h * dH - pH + h_offset * dilationH;
for (int64_t w = 0; w < width_col; ++w) {
int64_t w_pad = w * dW - pW + w_offset * dilationW;
if (t_pad >= 0 && t_pad < depth && h_pad >= 0 && h_pad < height &&
w_pad >= 0 && w_pad < width)
data_vol
[((c_vol * depth + t_pad) * height + h_pad) * width + w_pad] +=
data_col
[((c * depth_col + t) * height_col + h) * width_col + w];
}
}
}
}
}
} // namespace at::native
| 3,569
| 31.454545
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/native/ao_sparse/quantized/cpu/fbgemm_utils.h
|
#pragma once
#include <ATen/Tensor.h>
#include <c10/core/QScheme.h>
#ifdef USE_FBGEMM
#include <fbgemm/Fbgemm.h>
#include <fbgemm/FbgemmSparse.h>
#include <ATen/native/ao_sparse/quantized/cpu/packed_params.h>
namespace ao {
namespace sparse {
struct TORCH_API PackedLinearWeight
: public LinearPackedParamsBase {
PackedLinearWeight(std::unique_ptr<fbgemm::BCSRMatrix<int8_t>> w,
c10::optional<at::Tensor> bias,
std::vector<int32_t> col_offsets,
std::vector<float> w_scale,
std::vector<int32_t> w_zp,
c10::QScheme q_scheme,
const int64_t out_features_block_size /* block sparsity size across output_features */,
const int64_t in_features_block_size /* block sparsity size across input_features */)
: LinearPackedParamsBase(
out_features_block_size,
in_features_block_size),
w(std::move(w)),
bias_(std::move(bias)),
col_offsets(std::move(col_offsets)),
w_scale(std::move(w_scale)),
w_zp(std::move(w_zp)),
q_scheme(q_scheme) {}
std::unique_ptr<fbgemm::BCSRMatrix<int8_t>> w;
c10::optional<at::Tensor> bias_;
std::vector<int32_t> col_offsets;
std::vector<float> w_scale;
std::vector<int32_t> w_zp;
c10::QScheme q_scheme;
at::Tensor apply(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_relu(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) override;
at::Tensor apply_dynamic(const at::Tensor& input) override {
TORCH_INTERNAL_ASSERT(
false,
"Sparse quantized dynamic linear with fused relu is not yet "
"supported on qnnpack backend.");
return at::Tensor();
}
at::Tensor apply_dynamic_relu(const at::Tensor& input) override {
TORCH_INTERNAL_ASSERT(
false,
"Sparse quantized dynamic linear with fused relu is not yet "
"supported on qnnpack backend.");
return at::Tensor();
}
LinearPackedSerializationType unpack() override;
BCSRSerializationType serialize() override;
static c10::intrusive_ptr<LinearPackedParamsBase> deserialize(
const BCSRSerializationType& serialized);
c10::optional<at::Tensor> bias() override {
return bias_;
}
static c10::intrusive_ptr<LinearPackedParamsBase> prepack(
const at::Tensor& weight,
const c10::optional<at::Tensor>& bias,
const int64_t out_features_block_size,
const int64_t in_features_block_size);
private:
template <bool ReluFused>
at::Tensor apply_impl(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point);
};
}} // namespace ao::sparse
#endif // USE_FBGEMM
namespace ao {
namespace sparse {
int register_linear_params();
}} // namespace ao::sparse
| 2,924
| 29.154639
| 108
|
h
|
null |
pytorch-main/aten/src/ATen/native/ao_sparse/quantized/cpu/packed_params.h
|
#pragma once
#include <cstdint>
#include <ATen/core/ivalue.h>
namespace ao {
namespace sparse {
// <Weight, bias, out_features_block_size, in_features_block_size>
using LinearPackedSerializationType =
std::tuple<at::Tensor, c10::optional<at::Tensor>, std::vector<int64_t>>;
#define SPARSE_LINEAR_PACKED_PARAM_SERIALIZATION_VERSION 2
using BCSRSerializationType =
std::tuple<
int64_t, // Serialization Version
c10::optional<at::Tensor>, // Bias
int64_t, // Out Features (Row) Block Size
int64_t, // In Features (Column) Block Size
at::Tensor, // Weight Scales (single element vector if per-tensor) (float)
at::Tensor, // Wrapper for Weight Zero Points (single element vector if per-tensor) (int8_t)
bool, // Quantization Scheme (true: per tensor, false: per channel)
at::Tensor, // Wrapper for Row Block Indices (int8_t, int16_t, or int32_t)
at::Tensor, // Wrapper for Column Block Indices (int8_t, int16_t, or int32_t)
at::Tensor, // Wrapper for Non-Zero Weight Values, each +128 (uint8_t)
int64_t, // Number of Output Channels
int64_t // Number of Input Channels
>;
using BCSR =
std::tuple<
std::vector<int8_t>, // Non-Zero Weight Values
std::vector<int32_t>, // Compressed Row Block Indices
std::vector<int32_t> // Column Block Indices
>;
struct LinearPackedParamsBase : public torch::jit::CustomClassHolder {
public:
LinearPackedParamsBase(
const int64_t out_features_block_size,
const int64_t in_features_block_size)
: out_features_block_size_(out_features_block_size),
in_features_block_size_(in_features_block_size) {}
virtual at::Tensor apply(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) = 0;
virtual at::Tensor apply_relu(
const at::Tensor& input,
double output_scale,
int64_t output_zero_point) = 0;
virtual at::Tensor apply_dynamic(const at::Tensor& input) = 0;
virtual at::Tensor apply_dynamic_relu(const at::Tensor& input) = 0;
virtual LinearPackedSerializationType unpack() = 0;
virtual BCSRSerializationType serialize() = 0;
virtual c10::optional<at::Tensor> bias() = 0;
virtual void set_bias(const c10::optional<at::Tensor>& bias) {
throw std::runtime_error(
"set_bias is not implemented for this packed "
"parameter type");
}
protected:
const int64_t out_features_block_size_, in_features_block_size_;
};
}} // namespace ao::sparse
| 2,747
| 35.157895
| 116
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/AtomicAddFloat.h
|
#ifndef ATOMIC_ADD_FLOAT
#define ATOMIC_ADD_FLOAT
#if (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))
#include <ATen/native/cpu/Intrinsics.h>
#else
#define _mm_pause()
#endif
#include <atomic>
static inline void cpu_atomic_add_float(float* dst, float fvalue)
{
typedef union {
unsigned intV;
float floatV;
} uf32_t;
uf32_t new_value, old_value;
std::atomic<unsigned>* dst_intV = (std::atomic<unsigned>*)(dst);
old_value.floatV = *dst;
new_value.floatV = old_value.floatV + fvalue;
unsigned* old_intV = (unsigned*)(&old_value.intV);
while (!std::atomic_compare_exchange_strong(dst_intV, old_intV, new_value.intV)) {
#ifdef __aarch64__
__asm__ __volatile__("yield;" : : : "memory");
#else
_mm_pause();
#endif
old_value.floatV = *dst;
new_value.floatV = old_value.floatV + fvalue;
}
}
#endif
| 857
| 21.578947
| 84
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/GridSamplerKernel.h
|
#pragma once
#include <ATen/native/DispatchStub.h>
#include <array>
#include <cstdint>
namespace at {
class TensorBase;
}
namespace at { namespace native {
using forward_2d_fn = void (*) (
const TensorBase &output,
const TensorBase &input,
const TensorBase &grid,
int64_t interpolation_mode,
int64_t padding_mode,
bool align_corners);
using backward_2d_fn = void (*) (
const TensorBase &grad_input,
const TensorBase &grad_grid,
const TensorBase &grad_output,
const TensorBase &input,
const TensorBase &grid,
int64_t interpolation_mode,
int64_t padding_mode,
bool align_corners,
std::array<bool, 2> output_mask);
DECLARE_DISPATCH(forward_2d_fn, grid_sampler_2d_cpu_kernel);
DECLARE_DISPATCH(backward_2d_fn, grid_sampler_2d_backward_cpu_kernel);
}} // namespace at::native
| 838
| 22.971429
| 70
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/IndexKernelUtils.h
|
#pragma once
#include <ATen/native/TensorIterator.h>
#include <c10/util/irange.h>
namespace at {
namespace native {
namespace {
static bool is_constant_index(int ntensor, const int64_t* strides) {
AT_ASSERT(ntensor >= 3);
for (const auto arg : c10::irange(2, ntensor)) {
if (strides[arg] != 0) {
return false;
}
}
return true;
}
struct Indexer {
Indexer(int64_t num_indexers, char** indexers, const int64_t* indexer_strides,
IntArrayRef original_sizes, IntArrayRef original_strides)
: num_indexers(num_indexers)
, indexers(indexers)
, indexer_strides(indexer_strides)
, original_strides(original_strides.data())
, original_sizes(original_sizes.data()) {
AT_ASSERT(static_cast<int64_t>(original_strides.size()) == num_indexers);
AT_ASSERT(static_cast<int64_t>(original_sizes.size()) == num_indexers);
}
int64_t num_indexers;
char** indexers;
const int64_t* indexer_strides;
const int64_t* original_strides;
const int64_t* original_sizes;
int64_t get(int64_t idx) {
int64_t offset = 0;
for (const auto j : c10::irange(num_indexers)) {
int64_t value = *(int64_t*)&indexers[j][idx * indexer_strides[j]];
int64_t size = original_sizes[j];
TORCH_CHECK_INDEX(value >= -size && value < size,
"index ", value, " is out of bounds for dimension ", j, " with size ", size);
if (value < 0) {
value += size;
}
offset += value * original_strides[j];
}
return offset;
}
};
} // anonymous namespace
template <typename scalar_t, typename func_t>
void cpu_index_kernel(TensorIteratorBase& iter, IntArrayRef index_size, IntArrayRef index_stride,
const func_t& f, bool serial_execution=false)
{
int ntensor = iter.ntensors();
// When launch the index parallel version, set a relative samll grain size less than the INTERNAL::GRAIN_SIZE
// to make the whole available thread numbers get more balanced work load and a better cache location.
// The grain size here is chosen by the op benchmark to overcome the thread launch overhead
const int index_parallel_grain_size = 3000;
auto loop = [&](char** data, const int64_t* strides, int64_t n) {
auto indexer = Indexer(ntensor - 2, &data[2], &strides[2], index_size, index_stride);
char* dst = data[0];
char* src = data[1];
if (is_constant_index(ntensor, strides)) {
// specialization for when every element uses the same index
int64_t offset = indexer.get(0);
for (const auto i : c10::irange(n)) {
f(dst + strides[0] * i, src + strides[1] * i, offset);
}
} else {
for (const auto i : c10::irange(n)) {
int64_t offset = indexer.get(i);
f(dst + strides[0] * i, src + strides[1] * i, offset);
}
}
};
if (serial_execution) {
iter.serial_for_each(loop, {0, iter.numel()});
} else {
iter.for_each(loop, index_parallel_grain_size);
}
}
} // at
} // native
| 2,977
| 32.460674
| 111
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/Intrinsics.h
|
#pragma once
#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
/* Clang-compatible compiler, targeting x86/x86-64 */
#include <x86intrin.h>
#elif defined(_MSC_VER)
/* Microsoft C/C++-compatible compiler */
#include <intrin.h>
#if _MSC_VER <= 1900
#define _mm256_extract_epi64(X, Y) (((uint64_t*)&X)[Y])
#endif
#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
/* GCC-compatible compiler, targeting x86/x86-64 */
#include <x86intrin.h>
#elif defined(__GNUC__) && defined(__ARM_NEON__)
/* GCC-compatible compiler, targeting ARM with NEON */
#include <arm_neon.h>
#elif defined(__GNUC__) && defined(__IWMMXT__)
/* GCC-compatible compiler, targeting ARM with WMMX */
#include <mmintrin.h>
#elif (defined(__GNUC__) || defined(__xlC__)) && \
(defined(__VEC__) || defined(__ALTIVEC__))
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
#include <altivec.h>
/* We need to undef those tokens defined by <altivec.h> to avoid conflicts
with the C++ types. => Can still use __bool/__vector */
#undef bool
#undef vector
#undef pixel
#elif defined(__GNUC__) && defined(__SPE__)
/* GCC-compatible compiler, targeting PowerPC with SPE */
#include <spe.h>
#endif
| 1,212
| 34.676471
| 74
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/IsContiguous.h
|
#pragma once
namespace at { namespace native { inline namespace CPU_CAPABILITY {
// n: number of function arguments (arity)
// traits: function_traits (see FunctionTraits.h)
// s: index of scalar argument or -1
template <int n, int stride_index, typename traits, int s=-1>
struct IsContiguous {
static bool eval(const int64_t* strides) {
using type = typename traits::template arg<n - 1>::type;
return strides[stride_index] == (s == n ? 0 : sizeof(type)) &&
IsContiguous<n - 1, stride_index - 1, traits, s>::eval(strides);
}
};
// will be called when there is an output exists
template <typename traits, int s>
struct IsContiguous<0, 0, traits, s> {
static bool eval(const int64_t* strides) {
return strides[0] == sizeof(typename traits::result_type);
}
};
// will be called when there is no output
template <typename traits, int s>
struct IsContiguous<0, -1, traits, s> {
static bool eval(const int64_t* /*strides*/) {
return true;
}
};
// output and all inputs are contiguous
template <typename traits,
typename std::enable_if<std::is_void<typename traits::result_type>::value>::type* = nullptr>
static inline bool is_contiguous(const int64_t* strides) {
return IsContiguous<traits::arity, traits::arity - 1, traits>::eval(strides);
}
template <typename traits,
typename std::enable_if<!std::is_void<typename traits::result_type>::value>::type* = nullptr>
static inline bool is_contiguous(const int64_t* strides) {
return IsContiguous<traits::arity, traits::arity, traits>::eval(strides);
}
// input at `s` is scalar (stride 0); output and other inputs are contiguous
// NB: output is typically at strides[0] so first input corresponds to s=1
template <typename traits, int s,
typename std::enable_if<std::is_void<typename traits::result_type>::value>::type* = nullptr>
static inline bool is_contiguous_scalar(const int64_t* strides) {
static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds");
return IsContiguous<traits::arity, traits::arity - 1, traits, s>::eval(strides);
}
template <typename traits, int s,
typename std::enable_if<!std::is_void<typename traits::result_type>::value>::type* = nullptr>
static inline bool is_contiguous_scalar(const int64_t* strides) {
static_assert(s > 0 && s <= traits::arity, "scalar argument index out of bounds");
return IsContiguous<traits::arity, traits::arity, traits, s>::eval(strides);
}
}}}
| 2,440
| 37.746032
| 97
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/LogAddExp.h
|
#pragma once
#include <c10/util/complex.h>
#include <ATen/NumericUtils.h>
namespace at { namespace native {
inline namespace CPU_CAPABILITY {
// custom min and max to be used in logcumsumexp for complex arguments
template <typename scalar_t>
std::pair<c10::complex<scalar_t>, c10::complex<scalar_t>> _logcumsumexp_minmax(c10::complex<scalar_t> x, c10::complex<scalar_t> y) {
if (at::_isnan(y)) { // either real is nan or imag is nan
return std::make_pair(y, y);
} else if (at::_isnan(x)) { // either real is nan or imag is nan
return std::make_pair(x, x);
} else {
return (x.real() < y.real()) ? std::make_pair(x, y) : std::make_pair(y, x);
}
}
template <typename scalar_t>
scalar_t _log_add_exp_helper(scalar_t x, scalar_t y) {
// Reference : https://www.tensorflow.org/api_docs/python/tf/math/cumulative_logsumexp
scalar_t min = at::_isnan(y) ? y : std::min(x, y); // std::min returns first arg if one of the args is nan
scalar_t max = at::_isnan(y) ? y : std::max(x, y); // std::max returns first arg if one of the args is nan
if (min != max || std::isfinite(min)) {
// nan will be propagated here
return std::log1p(std::exp(min - max)) + max;
} else {
// special case to correctly handle infinite cases
return x;
}
}
template <typename scalar_t>
c10::complex<scalar_t> _log_add_exp_helper(const c10::complex<scalar_t>& x, const c10::complex<scalar_t>& y) {
auto [min, max] = _logcumsumexp_minmax<scalar_t>(x, y);
auto min_real = std::real(min);
auto max_real = std::real(max);
if (at::_isnan(min)) { // either real is nan or imag is nan
// handling the "infectious" NaNs
return {std::numeric_limits<scalar_t>::quiet_NaN(), std::numeric_limits<scalar_t>::quiet_NaN()};
} else if (!std::isfinite(min_real) && (min_real == max_real)) {
if (min_real < 0) {
// handle the -inf case, the imaginary part here does not really matter as the exp(value)
// will be around 0.0 and the angle (i.e. the imaginary part) cannot be determined.
// It does not matter if we're taking the exp of this value
return min;
} else {
// handle the +inf case, we don't need the special precision for log1p for small values
// and to avoid producing nan in case of real(max) == real(min) == +inf
return std::log(std::exp(min) + std::exp(max));
}
} else {
return std::log1p(std::exp(min - max)) + max;
}
}
} // end namespace
}} //end at::native
| 2,458
| 38.66129
| 132
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/Loops.h
|
#pragma once
// This file provides two functions to help write elementwise kernels:
//
// cpu_kernel(TensorIterator iter, <lambda>)
// cpu_kernel_vec(TensorIterator iter, <lambda>, <vec_lambda>)
//
// Both functions may generate vectorized code. The cpu_kernel implementation
// relies on the compiler's auto-vectorization. The cpu_kernel_vec
// implementation uses x86 SIMD intrinsics when available. These functions
// are only intended to be used in the ATen/native/cpu subdirectory, since files
// in other directories are not compiled with AVX/AVX2 enabled. See README.md
// for more details.
//
// For example, to write a multiplication kernel for float:
//
// cpu_kernel(iter, [](float a, float b) { return a * b; });
//
// Or you may write:
//
// cpu_kernel_vec(iter,
// [](float a, float b) { return a * b; },
// [](Vectorized<float> a, Vectorized<float> b) { return a * b; });
//
// See BinaryOpsKernel.cpp for the complete implementation
//
//
#include <stdint.h>
#include <c10/util/C++17.h>
#include <c10/util/Load.h>
#include <c10/util/irange.h>
#include <ATen/detail/FunctionTraits.h>
#include <ATen/native/cpu/IsContiguous.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/TensorIteratorDynamicCasting.h>
#include <ATen/cpu/vec/vec.h>
#include <utility>
namespace at { namespace native { inline namespace CPU_CAPABILITY {
using namespace vec;
template <typename traits, std::size_t... INDEX>
typename traits::ArgsTuple
dereference_impl(char* C10_RESTRICT data[], const int64_t* strides, int64_t i,
std::index_sequence<INDEX...>) {
return std::make_tuple(
c10::load<typename traits::template arg<INDEX>::type>(
data[INDEX] + i * strides[INDEX])...);
}
template <typename traits>
typename traits::ArgsTuple
dereference(char* C10_RESTRICT data[], const int64_t* strides, int64_t i) {
using Indices = std::make_index_sequence<traits::arity>;
return dereference_impl<traits>(data, strides, i, Indices{});
}
template <typename traits, std::size_t... INDEX>
typename traits::ArgsTuple
dereference_vec_impl(char* C10_RESTRICT data[],
const typename traits::result_type& opt_scalar,
size_t S,
int64_t i,
std::index_sequence<INDEX...>) {
using Vec = typename traits::result_type;
using scalar_t = typename Vec::value_type;
return std::make_tuple(
S == INDEX + 1 ?
opt_scalar :
Vec::loadu(data[INDEX] + i * sizeof(scalar_t))...);
}
template <typename traits>
typename traits::ArgsTuple
dereference_vec(char* C10_RESTRICT data[], const typename traits::result_type& opt_scalar, size_t S, int64_t i) {
using Indices = std::make_index_sequence<traits::arity>;
return dereference_vec_impl<traits>(data, opt_scalar, S, i, Indices{});
}
template <typename func_t,
typename std::enable_if<!std::is_void<typename function_traits<func_t>::result_type>::value>::type* = nullptr>
static inline void
execute_op(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, int64_t n, func_t&& op) {
using traits = function_traits<func_t>;
using result_type = typename traits::result_type;
for (; i < n; i++) {
result_type* out_ptr = (result_type*)(data[0] + i * strides[0]);
*out_ptr = c10::guts::apply(std::forward<func_t>(op), dereference<traits>(
&data[1],
&strides[1],
i));
}
}
template <typename func_t,
typename std::enable_if<std::is_void<typename function_traits<func_t>::result_type>::value>::type* = nullptr>
static inline void
execute_op(char* C10_RESTRICT data[], const int64_t* strides, int64_t i, int64_t n, func_t&& op) {
using traits = function_traits<func_t>;
for (; i < n; i++) {
c10::guts::apply(std::forward<func_t>(op), dereference<traits>(
&data[0],
&strides[0],
i));
}
}
// Basic loop operation (one output, N inputs). May be auto-vectorized
// by the compiler. Supports inputs and outputs of different types.
template <typename func_t>
static inline void
basic_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_t i, int64_t n, func_t&& op) {
using traits = function_traits<func_t>;
constexpr int ntensors = traits::arity + 1;
// Copying strides to temporary array helps auto vectorization in older GCC
// versions.
int64_t strides[ntensors];
for (const auto arg : c10::irange(ntensors)) {
strides[arg] = strides_[arg];
}
execute_op(data, strides, i, n, std::forward<func_t>(op));
}
// the recursive variadic template for iterating over the returned tuple
template<class T, size_t N>
struct TupleOutput {
static void handle(char *C10_RESTRICT data[], const int64_t *strides, int64_t i,
const T &tuple) {
TupleOutput<T, N - 1>::handle(data, strides, i, tuple);
auto output = std::get<N - 1>(tuple);
using output_type = decltype(output);
output_type * out_ptr = (output_type *)(data[N - 1] + i * strides[N - 1]);
*out_ptr = output;
}
};
// Base case for the above recursive template
template<class T>
struct TupleOutput<T, 1> {
static void handle(char *C10_RESTRICT data[], const int64_t *strides, int64_t i,
const T &tuple) {
auto output = std::get<0>(tuple);
using output_type = decltype(output);
output_type* out_ptr = (output_type *)(data[0] + i * strides[0]);
*out_ptr = output;
}
};
template<class... Args>
void handle_tuple_outputs(char* C10_RESTRICT data[],
const int64_t* strides,
int64_t i,
const std::tuple<Args...> &tuple) {
TupleOutput<decltype(tuple), sizeof...(Args)>::handle(data, strides, i, tuple);
}
// Loop operation for `cpu_kernel_multiple_outputs`.
// 1. Use `c10::guts::apply` to make dynamic method invocation
// for the lambda passed in `cpu_kernel_multiple_outputs`.
// 2. Iterate over the members of the returned tuple, set the corresponding
// output tensor by the tuple member in `handle_tuple_outputs` function.
template <typename func_t>
static inline void
multiple_outputs_loop(char* C10_RESTRICT data[], const int64_t* strides_, int64_t i, int64_t n, func_t&& op) {
using traits = function_traits<func_t>;
using result_type = typename traits::result_type;
constexpr int num_outputs = std::tuple_size<result_type>::value;
constexpr int ntensors = traits::arity + num_outputs;
// Copying strides to temporary array helps auto vectorization in older GCC
// versions.
int64_t strides[ntensors];
for (const auto arg : c10::irange(ntensors)) {
strides[arg] = strides_[arg];
}
for (; i < n; i++) {
auto output = c10::guts::apply(op, dereference<traits>(
&data[num_outputs],
&strides[num_outputs],
i));
handle_tuple_outputs(data, strides, i, output);
}
}
// Explicitly vectorized loop implementation. All inputs and outputs must be
// the same type and contiguous with one exception: a single input may be
// a scalar (stride 0). It's position is indicated by the argument `S`. If `S`
// is 0, then there are no scalar inputs.
template <typename func_t, typename vec_func_t>
static inline void
vectorized_loop(char** C10_RESTRICT data_, int64_t n, int64_t S, func_t&& op, vec_func_t&& vop) {
using traits = function_traits<vec_func_t>;
using scalar_t = typename function_traits<func_t>::result_type;
using Vec = Vectorized<scalar_t>;
constexpr int ntensors = traits::arity + 1;
char* C10_RESTRICT data[ntensors];
for (const auto arg : c10::irange(ntensors)) {
data[arg] = data_[arg];
}
Vec opt_scalar = Vec(S > 0 ? *(scalar_t*)data[S] : scalar_t(0));
int64_t i = 0;
for (; i <= n - 2 * Vec::size(); i += 2 * Vec::size()) {
auto args1 = dereference_vec<traits>(&data[1], opt_scalar, S, i);
auto args2 = dereference_vec<traits>(&data[1], opt_scalar, S, i + Vec::size());
auto out1 = c10::guts::apply(std::forward<vec_func_t>(vop), std::move(args1));
auto out2 = c10::guts::apply(std::forward<vec_func_t>(vop), std::move(args2));
out1.store(data[0] + i * sizeof(scalar_t));
out2.store(data[0] + (i + Vec::size()) * sizeof(scalar_t));
}
if (i < n) {
int64_t strides[ntensors];
for (const auto arg : c10::irange(ntensors)) {
strides[arg] = (S > 0 && arg == S) ? 0 : sizeof(scalar_t);
}
basic_loop(data, strides, i, n, std::forward<func_t>(op));
}
}
template <typename traits, typename cb_t>
static inline void unroll_contiguous_scalar_checks(
const int64_t* /*strides*/,
std::index_sequence<>,
cb_t&& cb) {
cb(0);
}
template <typename traits, typename cb_t, size_t INDEX0, size_t ...INDEX>
static inline void unroll_contiguous_scalar_checks(
const int64_t* strides,
std::index_sequence<INDEX0, INDEX...>,
cb_t&& cb) {
if (is_contiguous_scalar<traits, INDEX0 + 1>(strides)) {
cb(INDEX0 + 1);
} else {
unroll_contiguous_scalar_checks<traits>(strides, std::index_sequence<INDEX...>{}, std::forward<cb_t>(cb));
}
}
template <typename op_t, typename vop_t>
struct VectorizedLoop2d {
op_t op;
vop_t vop;
using traits = function_traits<op_t>;
static constexpr int ntensors = traits::arity + 1;
using data_t = std::array<char*, ntensors>;
VectorizedLoop2d(const op_t &op, vop_t vop):
op(op), vop(std::move(vop)) {}
static void advance(data_t &data, const int64_t *outer_strides) {
for (const auto arg : c10::irange(data.size())) {
data[arg] += outer_strides[arg];
}
}
void operator()(char** base, const int64_t *strides, int64_t size0, int64_t size1) {
data_t data;
std::copy_n(base, ntensors, data.data());
const int64_t *outer_strides = &strides[ntensors];
if (is_contiguous<traits>(strides)) {
for (const auto i C10_UNUSED : c10::irange(size1)) {
vectorized_loop(data.data(), size0, 0, op, vop);
advance(data, outer_strides);
}
} else {
using Indices = std::make_index_sequence<traits::arity>;
unroll_contiguous_scalar_checks<traits>(strides, Indices{}, [&](size_t idx) {
if (idx) {
for (const auto i C10_UNUSED : c10::irange(size1)) {
vectorized_loop(data.data(), size0, idx, op, vop);
advance(data, outer_strides);
}
} else {
for (const auto i C10_UNUSED : c10::irange(size1)) {
basic_loop(data.data(), strides, 0, size0, op);
advance(data, outer_strides);
}
}
});
}
}
};
template <typename op_t, typename vop_t>
VectorizedLoop2d<op_t, vop_t> make_vectorized_loop2d(
const op_t &op, const vop_t &vop) {
return VectorizedLoop2d<op_t, vop_t>(op, vop);
}
template <typename func_t>
void cpu_kernel(TensorIteratorBase& iter, func_t&& op, int64_t grain_size = at::internal::GRAIN_SIZE) {
using traits = function_traits<func_t>;
// this could be extended to work with void return types
TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
// dynamic casting not currently supported on CPU
TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
iter.for_each([&](char** data, const int64_t* strides, int64_t n) {
// basic loop can handle 1d slices with arbitrary strides, and 1d slices is all that
// iter.for_each is ever sending to the loop lambda
basic_loop(data, strides, 0, n, std::forward<func_t>(op));
}, grain_size);
iter.cast_outputs();
}
// This function helps write elementwise kernels that requires multiple outputs.
// It follows the similar structure of cpu_kernel.
// Instead of `basic_loop` function, a new `multiple_outputs_loop` function is
// manipulated to handle multiple return values.
// For now `needs_dynamic_casting` check is not added as the passed lambda (`func_t`)
// of `multiple_outputs_loop` returns `std::tuple` instead of `scalar_t`.
// The `gpu_kernel_multiple_outputs` is also implemented without this check,
// We could extend `needs_dynamic_casting` to support both `std::tuple` and
// `thrust::tuple` in the future.
template <typename func_t>
void cpu_kernel_multiple_outputs(TensorIteratorBase& iter, func_t&& op, int64_t grain_size = at::internal::GRAIN_SIZE) {
using traits = function_traits<func_t>;
TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
iter.for_each([&](char** data, const int64_t* strides, int64_t n) {
multiple_outputs_loop(data, strides, 0, n, std::forward<func_t>(op));
}, grain_size);
iter.cast_outputs();
}
template <bool check_dynamic_cast=true, typename func_t, typename vec_func_t>
void cpu_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop, int64_t grain_size = at::internal::GRAIN_SIZE) {
using traits = function_traits<func_t>;
// this could be extended to work with void return types
TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
// dynamic casting not currently supported on CPU, but some kernels (like Fill)
// explicitly dynamic_cast, so we give the opt-out of checking.
if constexpr (check_dynamic_cast) {
TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
}
iter.for_each(make_vectorized_loop2d(op, vop), grain_size);
iter.cast_outputs();
}
template <typename func_t>
void cpu_serial_kernel(TensorIteratorBase& iter, func_t&& op, const Range& range) {
using traits = function_traits<func_t>;
constexpr bool result_void = std::is_void<typename traits::result_type>::value;
TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity &&
((result_void && iter.noutputs() == 0) || (!result_void && iter.noutputs() == 1)));
// dynamic casting not currently supported on CPU
TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
iter.serial_for_each([&](char** data, const int64_t* strides, int64_t n) {
basic_loop(data, strides, 0, n, std::forward<func_t>(op));
}, range);
iter.cast_outputs();
}
template <typename func_t>
void cpu_serial_kernel(TensorIteratorBase& iter, func_t&& op) {
cpu_serial_kernel(iter, op, {0, iter.numel()});
}
template <typename func_t, typename vec_func_t>
void cpu_serial_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop, const Range& range) {
using traits = function_traits<func_t>;
// this could be extended to work with void return types
TORCH_INTERNAL_ASSERT(iter.ninputs() == traits::arity);
TORCH_INTERNAL_ASSERT(iter.noutputs() == 1);
// dynamic casting not currently supported on CPU
TORCH_INTERNAL_ASSERT(!needs_dynamic_casting<func_t>::check(iter));
iter.serial_for_each(make_vectorized_loop2d(op, vop), range);
iter.cast_outputs();
}
template <typename func_t, typename vec_func_t>
void cpu_serial_kernel_vec(TensorIteratorBase& iter, func_t&& op, vec_func_t&& vop) {
cpu_serial_kernel_vec(iter, op, vop, {0, iter.numel()});
}
}}} // namespace at::native::<anonymous>
| 14,930
| 36.8
| 125
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/ReduceUtils.h
|
#pragma once
#include <ATen/Parallel.h>
#include <ATen/NumericUtils.h>
#include <ATen/cpu/vec/vec.h>
#include <ATen/cpu/vec/functional.h>
#include <ATen/native/ReductionType.h>
#include <c10/util/irange.h>
#include <ATen/OpMathType.h>
#include <ATen/native/cpu/utils.h>
#include <ATen/OpMathType.h>
namespace at::native {
inline namespace CPU_CAPABILITY {
using namespace vec;
#define AT_DISPATCH_REDUCTION_TYPES(op, ...) \
[&] { \
switch (op) { \
case ReductionType::SUM: { \
static constexpr auto reduce = ReductionType::SUM; \
return __VA_ARGS__(); \
} \
case ReductionType::MEAN: { \
static constexpr auto reduce = ReductionType::MEAN; \
return __VA_ARGS__(); \
} \
case ReductionType::MIN: { \
static constexpr auto reduce = ReductionType::MIN; \
return __VA_ARGS__(); \
} \
case ReductionType::MAX: { \
static constexpr auto reduce = ReductionType::MAX; \
return __VA_ARGS__(); \
} \
case ReductionType::PROD: { \
static constexpr auto reduce = ReductionType::PROD; \
return __VA_ARGS__(); \
} \
} \
}()
template <typename scalar_t, ReductionType reduce>
inline vec_scalar_t<scalar_t> init_value() {
using acc_t = vec_scalar_t<scalar_t>;
acc_t val;
if (reduce == ReductionType::SUM ||
reduce == ReductionType::MEAN) {
val = static_cast<acc_t>(0);
} else if (reduce == ReductionType::PROD) {
val = static_cast<acc_t>(1);
} else if (reduce == ReductionType::MAX) {
val = -std::numeric_limits<acc_t>::infinity();
} else {
TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN);
val = std::numeric_limits<acc_t>::infinity();
}
return val;
}
template <typename scalar_t, ReductionType reduce>
inline vec_scalar_t<scalar_t> init_value(const c10::optional<Scalar>& initial) {
using acc_t = vec_scalar_t<scalar_t>;
if (initial.has_value()) {
return initial.value().to<acc_t>();
} else {
return init_value<scalar_t, reduce>();
}
}
template <typename scalar_t>
inline void init(scalar_t* out, int64_t size, const vec_scalar_t<scalar_t>& val) {
using Vec = Vectorized<vec_scalar_t<scalar_t>>;
map<scalar_t>(
[val](Vec x) { return Vec(val); },
out,
out,
size);
}
template <typename scalar_t, ReductionType reduce>
inline void init(scalar_t* out, int64_t size, const c10::optional<Scalar>& initial) {
using acc_t = vec_scalar_t<scalar_t>;
acc_t val = init_value<scalar_t, reduce>(initial);
init(out, size, val);
}
// overload with `include_self`, used by scatter_reduce
template <typename scalar_t, ReductionType reduce>
inline void init(scalar_t* out, int64_t size, bool include_self = false) {
using acc_t = vec_scalar_t<scalar_t>;
if (!include_self) {
acc_t val = init_value<scalar_t, reduce>();
init(out, size, val);
}
}
template <typename scalar_t, ReductionType reduce>
inline void _init(scalar_t* self_ptr, at::opmath_type<scalar_t>* buffer_ptr, int64_t size, bool include_self) {
if (!include_self) {
init<at::opmath_type<scalar_t>, reduce>(buffer_ptr, size, include_self);
} else {
vec::convert(self_ptr, buffer_ptr, size);
}
}
template <typename scalar_t>
inline typename std::enable_if<!std::is_same<scalar_t, Vec2>::value, scalar_t>::type
_max(const scalar_t& x, const scalar_t& y) {
return at::_isnan(y) ? y : std::max(x, y);
}
template <typename scalar_t>
inline Vectorized<scalar_t> _max(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) {
// vec::maximum propagates NaN
return vec::maximum(x, y);
}
template <typename vec_t>
inline typename std::enable_if<std::is_same<vec_t, Vec2>::value, Vec2>::type
_max(const vec_t& x, const vec_t& y) {
// vec::maximum propagates NaN
return maximum(x, y);
}
template <typename scalar_t>
inline typename std::enable_if<!std::is_same<scalar_t, Vec2>::value, scalar_t>::type
_min(const scalar_t& x, const scalar_t& y) {
return at::_isnan(y) ? y : std::min(x, y);
}
template <typename scalar_t>
inline Vectorized<scalar_t> _min(const Vectorized<scalar_t>& x, const Vectorized<scalar_t>& y) {
// vec::minimum propagates NaN
return vec::minimum(x, y);
}
template <typename vec_t>
inline typename std::enable_if<std::is_same<vec_t, Vec2>::value, Vec2>::type
_min(const vec_t& x, const vec_t& y) {
// vec::minimum propagates NaN
return minimum(x, y);
}
template <typename scalar_t, typename accumut, typename Op,
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
inline void map_acc(
const Op& vec_fun,
accumut* output_data,
const accumut* input_data,
const scalar_t* input_data2,
int64_t size) {
using Vec = vec::Vectorized<scalar_t>;
using aVec = vec::Vectorized<accumut>;
int64_t d = 0;
constexpr int64_t kVecSize = Vec::size();
constexpr int64_t kaVecSize = aVec::size();
for (d = 0; d < size - (size % kVecSize); d += kVecSize) {
Vec data2_vec = Vec::loadu(input_data2 + d);
aVec data2_avec0, data2_avec1;
std::tie(data2_avec0, data2_avec1) = convert_to_float<scalar_t>(data2_vec);
aVec input_vec0 = aVec::loadu(input_data + d);
aVec input_vec1 = aVec::loadu(input_data + d + kaVecSize);
vec_fun(input_vec0, data2_avec0).store(output_data + d);
vec_fun(input_vec1, data2_avec1).store(output_data + d + kaVecSize);
}
if (size - d > 0) {
int64_t tail_size = size - d;
Vec data2_vec = Vec::loadu(input_data2 + d, tail_size);
aVec data2_avec0, data2_avec1;
std::tie(data2_avec0, data2_avec1) = convert_to_float<scalar_t>(data2_vec);
if (tail_size > kaVecSize) {
aVec input_vec0 = aVec::loadu(input_data + d);
aVec input_vec1 = aVec::loadu(input_data + d + kaVecSize, tail_size - kaVecSize);
vec_fun(input_vec0, data2_avec0).store(output_data + d);
vec_fun(input_vec1, data2_avec1).store(output_data + d + kaVecSize, tail_size - kaVecSize);
} else {
aVec input_vec0 = aVec::loadu(input_data + d, tail_size);
vec_fun(input_vec0, data2_avec0).store(output_data + d, tail_size);
}
}
}
// for Max and Min, propagate NaN:
template <typename T, ReductionType reduce>
inline T update(const T& x, const T& y) {
if (reduce == ReductionType::SUM ||
reduce == ReductionType::MEAN) {
return x + y;
} else if (reduce == ReductionType::PROD) {
return x * y;
} else if (reduce == ReductionType::MAX) {
return _max(x, y);
} else {
TORCH_INTERNAL_ASSERT(reduce == ReductionType::MIN);
return _min(x, y);
}
}
template <typename scalar_t, ReductionType reduce>
inline void update(scalar_t* out, scalar_t* data, int64_t K) {
using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>;
map2<scalar_t>(
[](Vec x, Vec y) { return update<Vec, reduce>(x, y); },
out,
out,
data,
K);
}
template <typename scalar_t, ReductionType reduce,
typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
inline void update(at::opmath_type<scalar_t>* out, scalar_t* data, int64_t K) {
using opmath_t = at::opmath_type<scalar_t>;
using Vec = vec::Vectorized<opmath_t>;
map_acc<scalar_t, opmath_t>(
[](Vec x, Vec y) { return update<Vec, reduce>(x, y); },
out,
out,
data,
K);
}
template <typename scalar_t, ReductionType reduce>
inline void write(scalar_t* out, int64_t count, int64_t K) {
using Vec = vec::Vectorized<vec_scalar_t<scalar_t>>;
if (reduce == ReductionType::MEAN) {
if (count > 0) {
vec::map<scalar_t>(
[count](Vec x) { return x / Vec(count); },
out,
out,
K);
}
}
}
} // namespace CPU_CAPABILITY
} // namespace at::native
| 8,874
| 35.825726
| 111
|
h
|
null |
pytorch-main/aten/src/ATen/native/cpu/SerialStackImpl.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/Parallel.h>
#include <ATen/TensorIterator.h>
#include <ATen/cpu/vec/functional.h>
#include <ATen/cpu/vec/vec.h>
#include <c10/util/irange.h>
namespace at { namespace native { namespace detail {
struct InputMeta {
void* data_ptr;
int64_t inner_size;
InputMeta(const Tensor& t, int64_t dim, int64_t inner)
: data_ptr(t.data_ptr()), inner_size(t.sizes()[dim] * inner) {}
};
// This kernel is used by two TensorList types:
// 1. stack_serial_kernel uses at::ArrayRef<Tensor>
// 2. Static runtime calls this kernel directly (csrc/jit/runtime/static/ops.cpp) with
// ProcessedNodeInputWrapper.
// When making changes, make sure that they are compatible with both types!
template <typename scalar_t, typename TensorListType>
void stack_serial_kernel_impl(Tensor& result, TensorListType tensors, int64_t dim) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
dim >= 0 && dim <= result.dim(),
"dim out of range in stack_serial_kernel_impl");
int64_t outer =
result.numel() / (result.sizes()[dim] * result.strides()[dim]);
scalar_t* result_data = result.data_ptr<scalar_t>();
int64_t ninputs = tensors.size();
std::vector<InputMeta> inputs;
inputs.reserve(ninputs);
for (const auto& tensor : tensors) {
inputs.emplace_back(tensor, dim, tensor.strides()[dim]);
}
using Vec = vec::Vectorized<scalar_t>;
scalar_t* result_ptr = result_data;
for (const auto i : c10::irange(outer)) {
for (const auto j : c10::irange(ninputs)) {
int64_t local_inner = inputs[j].inner_size;
scalar_t* input_ptr = (scalar_t*)(inputs[j].data_ptr) + i * local_inner;
if (local_inner < Vec::size()) {
for (const auto k : c10::irange(local_inner)) {
result_ptr[k] = input_ptr[k];
}
} else {
vec::map(
[](Vec x) { return x; }, result_ptr, input_ptr, local_inner);
}
result_ptr += local_inner;
}
}
}
// Checks to see whether native stack can be invoked under these conditions:
// - result and input tensors are contiguous
// - only one thread is used
// - no type promotion has to occur
// - tensors dtype is Double or Float
template <typename TensorListType>
bool can_use_native_serial_stack_impl(Tensor& result, TensorListType tensors, int64_t dim) {
TORCH_CHECK(tensors.size() > 0, "expected a non-empty list of Tensors");
const Tensor& first_tensor = tensors[0];
// stack dimension should be in range [0,firstTensor.dim())
// dim == firstTensor.dim() is a valid input, but it is handled by default code path
// that uses unsqueeze
if (dim >= first_tensor.dim()) return false;
// Native stack doesn't apply any tensor is skipped.
if (first_tensor.numel() == 0 && first_tensor.dim() == 1) return false;
// there should be no type promotion
if (result.dtype() != first_tensor.dtype()) return false;
auto first_tensor_mem_format = first_tensor.suggest_memory_format();
ScalarType dtype = first_tensor.scalar_type();
if (!result.is_contiguous(first_tensor_mem_format)) {
return false;
}
// fast path only works for Double and Float
if (dtype != ScalarType::Double && dtype != ScalarType::Float) {
return false;
}
// check remainder of inputs
auto const &first_tensor_shape = first_tensor.sizes();
for (const auto i : c10::irange(1, tensors.size())) {
auto const &tensor = tensors[i];
TORCH_CHECK(tensors[i].sizes() == first_tensor.sizes(),
"stack expects each tensor to be equal size, but got ", first_tensor_shape,
" at entry 0 and ", tensor.sizes(), " at entry ", i);
// every tensor must be contiguous
// tensor sizes and strides must be the same
// there should be no type promotion
if (!tensor.is_contiguous(first_tensor_mem_format) ||
tensor.strides() != first_tensor.strides() ||
tensor.dtype() != dtype) {
return false;
}
}
// fast native stack should only be used when it is not worth using multiple threads
// or there is only one thread. Note that we aren't checking result.numel() here because
// it may not have been resized and we want to defer that cost till later.
int64_t numel_in_stack = first_tensor.numel() * tensors.size();
return numel_in_stack < at::internal::GRAIN_SIZE || at::get_num_threads() == 1;
}
template <typename TensorListType, bool should_skip_overlap_check>
struct CanUseNativeSerialStack;
template <typename TensorListType>
struct CanUseNativeSerialStack<TensorListType, false> {
static bool call(Tensor& result, TensorListType tensors, int64_t dim) {
// Inputs cannot alias the output tensor
for (const auto i : c10::irange(tensors.size())) {
auto lap = at::get_overlap_status(result, tensors[i]);
TORCH_CHECK(lap != at::MemOverlapStatus::Partial &&
lap != at::MemOverlapStatus::Full, 0,
"unsupported operation: the input tensors cannot refer to any of the "
"output memory locations. Found overlap in input tensor ", i);
}
return can_use_native_serial_stack_impl(result, tensors, dim);
}
};
template <typename TensorListType>
struct CanUseNativeSerialStack<TensorListType, true> {
static bool call(Tensor& result, TensorListType tensors, int64_t dim) {
return can_use_native_serial_stack_impl(result, tensors, dim);
}
};
}}} // namespace at::native::detail
| 5,447
| 36.572414
| 92
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.