repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/aten/src/ATen/nnapi/nnapi_bind.h
|
#ifndef NNAPI_BIND_H_
#define NNAPI_BIND_H_
#include <vector>
#include <ATen/ATen.h>
#include <torch/custom_class.h>
#include <ATen/nnapi/nnapi_wrapper.h>
namespace torch {
namespace nnapi {
namespace bind {
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TORCH_API extern nnapi_wrapper* nnapi;
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
TORCH_API extern nnapi_wrapper* check_nnapi;
#define MAKE_SMART_PTR(type) \
struct type ## Freer { \
void operator()(ANeuralNetworks ## type * obj) { \
if (!nnapi) { /* obj must be null. */ return; } \
nnapi-> type ## _free(obj); \
} \
}; \
typedef std::unique_ptr<ANeuralNetworks ## type, type ## Freer> type ## Ptr;
MAKE_SMART_PTR(Model)
MAKE_SMART_PTR(Compilation)
MAKE_SMART_PTR(Execution)
#undef MAKE_SMART_PTR
struct NnapiCompilation : torch::jit::CustomClassHolder {
NnapiCompilation() = default;
~NnapiCompilation() override = default;
// only necessary for older models that still call init()
TORCH_API void init(
at::Tensor serialized_model_tensor,
std::vector<at::Tensor> parameter_buffers
);
TORCH_API void init2(
at::Tensor serialized_model_tensor,
const std::vector<at::Tensor>& parameter_buffers,
int64_t compilation_preference,
bool relax_f32_to_f16
);
TORCH_API void run(std::vector<at::Tensor> inputs, std::vector<at::Tensor> outputs);
static void get_operand_type(const at::Tensor& t, ANeuralNetworksOperandType* operand, std::vector<uint32_t>* dims);
ModelPtr model_;
CompilationPtr compilation_;
int32_t num_inputs_ {};
int32_t num_outputs_ {};
};
} // namespace bind
} // namespace nnapi
} // namespace torch
#endif // NNAPI_BIND_H_
| 1,772
| 25.462687
| 120
|
h
|
null |
pytorch-main/aten/src/ATen/nnapi/nnapi_model_loader.h
|
#ifndef NNAPI_MODEL_LOADER_H_
#define NNAPI_MODEL_LOADER_H_
#include <stdint.h>
#include <ATen/nnapi/NeuralNetworks.h>
#include <ATen/nnapi/nnapi_wrapper.h>
namespace caffe2 {
namespace nnapi {
int load_nnapi_model(
struct nnapi_wrapper* nnapi,
ANeuralNetworksModel* model,
const void* serialized_model,
int64_t model_length,
size_t num_buffers,
const void** buffer_ptrs,
int32_t* buffer_sizes,
size_t num_memories,
ANeuralNetworksMemory** memories,
int32_t* memory_sizes,
int32_t* out_input_count,
int32_t* out_output_count,
size_t* out_bytes_consumed);
}} // namespace caffe2::nnapi
#endif // NNAPI_MODEL_LOADER_H_
| 675
| 21.533333
| 38
|
h
|
null |
pytorch-main/aten/src/ATen/ops/from_blob.h
|
#pragma once
#include <ATen/core/Tensor.h>
namespace at {
namespace detail {
TORCH_API inline void noopDelete(void*) {}
} // namespace detail
/// Provides a fluent API to construct tensors from external data.
///
/// The fluent API can be used instead of `from_blob` functions in case the
/// required set of parameters does not align with the existing overloads.
///
/// at::Tensor tensor = at::for_blob(data, sizes)
/// .strides(strides)
/// .context(context, [](void *ctx) { delete static_cast<Ctx*>(ctx); })
/// .options(...)
/// .make_tensor();
///
class TORCH_API TensorMaker {
friend TensorMaker for_blob(void* data, IntArrayRef sizes) noexcept;
public:
using ContextDeleter = DeleterFnPtr;
TensorMaker& strides(OptionalIntArrayRef value) noexcept {
strides_ = value;
return *this;
}
TensorMaker& storage_offset(c10::optional<int64_t> value) noexcept {
storage_offset_ = value;
return *this;
}
TensorMaker& deleter(std::function<void(void*)> value) noexcept {
deleter_ = std::move(value);
return *this;
}
TensorMaker& context(void* value, ContextDeleter deleter = nullptr) noexcept {
ctx_ = std::unique_ptr<void, ContextDeleter>{
value, deleter != nullptr ? deleter : detail::noopDelete};
return *this;
}
TensorMaker& target_device(c10::optional<Device> value) noexcept {
device_ = value;
return *this;
}
TensorMaker& options(TensorOptions value) noexcept {
opts_ = value;
return *this;
}
Tensor make_tensor();
private:
explicit TensorMaker(void* data, IntArrayRef sizes) noexcept
: data_{data}, sizes_{sizes} {}
std::size_t computeStorageSize() const noexcept;
DataPtr makeDataPtrFromDeleter() const;
DataPtr makeDataPtrFromContext() noexcept;
IntArrayRef makeTempSizes() const noexcept;
void* data_;
IntArrayRef sizes_;
OptionalIntArrayRef strides_{};
c10::optional<int64_t> storage_offset_{};
std::function<void(void*)> deleter_{};
std::unique_ptr<void, ContextDeleter> ctx_{nullptr, detail::noopDelete};
c10::optional<Device> device_{};
TensorOptions opts_{};
};
inline TensorMaker for_blob(void* data, IntArrayRef sizes) noexcept {
return TensorMaker{data, sizes};
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
IntArrayRef strides,
const std::function<void(void*)>& deleter,
const TensorOptions& options = {},
const c10::optional<Device> target_device = c10::nullopt) {
return for_blob(data, sizes)
.strides(strides)
.deleter(deleter)
.options(options)
.target_device(target_device)
.make_tensor();
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
IntArrayRef strides,
int64_t storage_offset,
const std::function<void(void*)>& deleter,
const TensorOptions& options = {},
const c10::optional<Device> target_device = c10::nullopt) {
return for_blob(data, sizes)
.strides(strides)
.storage_offset(storage_offset)
.deleter(deleter)
.options(options)
.target_device(target_device)
.make_tensor();
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
const std::function<void(void*)>& deleter,
const TensorOptions& options = {},
const c10::optional<Device> target_device = c10::nullopt) {
return for_blob(data, sizes)
.deleter(deleter)
.options(options)
.target_device(target_device)
.make_tensor();
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
IntArrayRef strides,
const TensorOptions& options = {}) {
return for_blob(data, sizes)
.strides(strides)
.options(options)
.make_tensor();
}
inline Tensor from_blob(
void* data,
IntArrayRef sizes,
const TensorOptions& options = {}) {
return for_blob(data, sizes).options(options).make_tensor();
}
} // namespace at
| 3,930
| 23.879747
| 83
|
h
|
null |
pytorch-main/aten/src/ATen/ops/tensor.h
|
#pragma once
#include <ATen/core/Tensor.h>
#include <c10/core/ScalarType.h>
namespace at {
// These functions are defined in ATen/Utils.cpp.
#define TENSOR(T, S) \
TORCH_API Tensor tensor(ArrayRef<T> values, const TensorOptions& options); \
inline Tensor tensor( \
std::initializer_list<T> values, const TensorOptions& options) { \
return at::tensor(ArrayRef<T>(values), options); \
} \
inline Tensor tensor(T value, const TensorOptions& options) { \
return at::tensor(ArrayRef<T>(value), options); \
} \
inline Tensor tensor(ArrayRef<T> values) { \
return at::tensor(std::move(values), at::dtype(k##S)); \
} \
inline Tensor tensor(std::initializer_list<T> values) { \
return at::tensor(ArrayRef<T>(values)); \
} \
inline Tensor tensor(T value) { \
return at::tensor(ArrayRef<T>(value)); \
}
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
AT_FORALL_COMPLEX_TYPES(TENSOR)
#undef TENSOR
} // namespace at
| 1,631
| 51.645161
| 79
|
h
|
null |
pytorch-main/aten/src/ATen/quantized/QTensorImpl.h
|
#pragma once
#include <ATen/quantized/Quantizer.h>
#include <c10/core/TensorImpl.h>
#include <c10/util/Exception.h>
namespace at {
/**
* QTensorImpl is a TensorImpl for Quantized Tensors, it stores Quantizer which
* specifies the quantization scheme and parameters, for more information please
* see ATen/quantized/Quantizer.h
*
* We'll use QTensor in code or documentation to refer to a Tensor with QTensorImpl.
*/
struct TORCH_API QTensorImpl : public c10::TensorImpl {
public:
QTensorImpl(
Storage&& storage,
DispatchKeySet key_set,
const caffe2::TypeMeta data_type,
QuantizerPtr quantizer);
// See Note [Enum ImplType]
QTensorImpl(
ImplType type,
Storage&& storage,
DispatchKeySet key_set,
const caffe2::TypeMeta data_type,
QuantizerPtr quantizer);
// TODO: Expose in PyTorch Frontend
QuantizerPtr quantizer() {
return quantizer_;
}
void set_quantizer_(QuantizerPtr quantizer) {
quantizer_ = quantizer;
}
/**
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
*
* For usage of `version_counter` and `allow_tensor_metadata_change`,
* see NOTE [ TensorImpl Shallow-Copying ].
*/
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
const c10::VariableVersion& version_counter,
bool allow_tensor_metadata_change) const override {
auto impl = c10::make_intrusive<QTensorImpl>(
Storage(storage()), key_set(), data_type_, quantizer_);
copy_tensor_metadata(
/*src_impl=*/this,
/*dest_impl=*/impl.get(),
/*version_counter=*/version_counter,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
impl->refresh_numel();
impl->refresh_contiguous();
return impl;
}
/**
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
*
* For usage of `version_counter` and `allow_tensor_metadata_change`,
* see NOTE [ TensorImpl Shallow-Copying ].
*/
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) const override {
auto impl = c10::make_intrusive<QTensorImpl>(
Storage(storage()), key_set(), data_type_, quantizer_);
copy_tensor_metadata(
/*src_impl=*/this,
/*dest_impl=*/impl.get(),
/*version_counter=*/std::move(version_counter),
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
impl->refresh_numel();
impl->refresh_contiguous();
return impl;
}
/**
* Shallow-copies data from another TensorImpl into this TensorImpl.
*
* For why this function doesn't check this TensorImpl's `allow_tensor_metadata_change_`,
* see NOTE [ TensorImpl Shallow-Copying ].
*/
void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
auto q_impl = static_cast<const QTensorImpl*>(impl.get());
copy_tensor_metadata(
/*src_impl=*/q_impl,
/*dest_impl=*/this,
/*version_counter=*/version_counter(),
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
refresh_numel();
refresh_contiguous();
}
private:
QuantizerPtr quantizer_;
const char* tensorimpl_type_name() const override;
/**
* Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / storage_offset)
* from one TensorImpl to another TensorImpl.
*
* For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE [ TensorImpl Shallow-Copying ].
*/
static void copy_tensor_metadata(
const QTensorImpl* src_q_impl,
QTensorImpl* dest_q_impl,
const c10::VariableVersion& version_counter,
bool allow_tensor_metadata_change) {
TensorImpl::copy_tensor_metadata(src_q_impl, dest_q_impl, version_counter, allow_tensor_metadata_change);
// OpaqueTensorImpl-specific fields.
dest_q_impl->quantizer_ = src_q_impl->quantizer_;
}
};
} // namespace at
| 4,009
| 30.825397
| 112
|
h
|
null |
pytorch-main/aten/src/ATen/quantized/Quantizer.h
|
#pragma once
#include <c10/core/QScheme.h>
#include <c10/core/MemoryFormat.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/intrusive_ptr.h>
#include <c10/core/ScalarType.h>
#include <c10/core/TensorOptions.h>
#include <ATen/Tensor.h>
#include <ATen/TensorUtils.h>
#include <ATen/core/QuantizerBase.h>
#include <cmath>
#include <memory>
#include <utility>
namespace at {
/**
* UnknownQuantizer is a placeholder quantizer for functions that implement
* quantization in a two step process. First a tensor is allocated but with
* unknown quantizer, and then the quantization kernel decides what the final
* quantizer will be.
*/
struct TORCH_API UnknownQuantizer : public Quantizer {
explicit UnknownQuantizer(ScalarType scalar_type)
: Quantizer(scalar_type) {}
Tensor quantize(const Tensor& tensor) override;
Tensor dequantize(const Tensor& qtensor) override;
Tensor& dequantize_out(Tensor& rtensor, const Tensor& qtensor) override;
QScheme qscheme() const override;
bool equalTo(QuantizerPtr other) const override;
};
/**
* UniformQuantizer is the parent class for all uniform quantizers.
* These quantization scheme will map float value uniformly to
* the quantized value. For example, affine quantizer is
* the most commonly used scheme in this category.
*/
struct TORCH_API UniformQuantizer : public Quantizer {
explicit UniformQuantizer(ScalarType scalar_type) : Quantizer(scalar_type) {}
};
/**
* NonUniformQuantizer is the parent class for all non-uniform quantizers.
* These quantization scheme may map float value non-uniformly to the quantized
* value. K-means quantization is a representative example in this category.
*/
struct TORCH_API NonUniformQuantizer : public Quantizer {
explicit NonUniformQuantizer(ScalarType scalar_type) : Quantizer(scalar_type) {}
};
// There is also StochasticQuantizer which is uniform but not affine
/**
* AffineQuantizer uses affine transformation to do quantization.
*
* For quantize:
* Y = clamp(round(X / scale + zero_point), min, max)
* For dequantize:
* X = (Y - zero_point) * scale
*/
struct TORCH_API AffineQuantizer : public UniformQuantizer {
explicit AffineQuantizer(ScalarType scalar_type) : UniformQuantizer(scalar_type) {}
};
// Note that we will not have Symmetric Quantizer in backend to reduce
// complications in quantized kernel implementation.
/**
* PerTensorAffineQuantizer stores a scale and a zero_point, which is used for
* all the values in the Tensor.
*/
struct TORCH_API PerTensorAffineQuantizer : public AffineQuantizer {
explicit PerTensorAffineQuantizer(ScalarType scalar_type, double scale, int64_t zero_point)
: AffineQuantizer(scalar_type),
scale_(scale),
zero_point_(zero_point) {}
Tensor quantize(const Tensor& tensor) override;
Tensor dequantize(const Tensor& qtensor) override;
Tensor& dequantize_out(Tensor& rtensor, const Tensor& qtensor) override;
QScheme qscheme() const override {
return kPerTensorAffine;
}
double scale() const {
return scale_;
}
int64_t zero_point() const {
return zero_point_;
}
bool equalTo(QuantizerPtr other) const override {
if (!other.get() || other->qscheme() != kPerTensorAffine) {
return false;
}
auto* other_per_tensor_affine =
static_cast<PerTensorAffineQuantizer*>(other.get());
return scalar_type() == other_per_tensor_affine->scalar_type() &&
scale() == other_per_tensor_affine->scale() &&
zero_point() == other_per_tensor_affine->zero_point();
}
private:
const double scale_;
// We use int64_t for consistency with Python
const int64_t zero_point_;
};
/**
* PerChannelAffineQuantizer is the same as PerTensorAffineQuantizer
* except that we have an independent scale and zero_point parameter
* for each channel.
*
* Also note that per channel quantization is mostly applied to output channels
* of weights since per-input channel of weight quantization or per-channel
* quantization for activations can't be efficiently supported in most of
* processors since it requires each multiplication result within a single
* dot-product to have a different scale.
*/
struct TORCH_API PerChannelAffineQuantizer : public AffineQuantizer {
explicit PerChannelAffineQuantizer(
ScalarType scalar_type,
Tensor scales,
Tensor zero_points,
int64_t axis)
: AffineQuantizer(scalar_type),
scales_(std::move(scales)),
zero_points_(std::move(zero_points)),
axis_(axis) {}
QScheme qscheme() const override {
return kPerChannelAffine;
}
Tensor scales() const {
return scales_;
}
Tensor zero_points() const {
return zero_points_;
}
int64_t axis() const {
return axis_;
}
Tensor quantize(const Tensor& tensor) override;
Tensor dequantize(const Tensor& qtensor) override;
Tensor& dequantize_out(Tensor& rtensor, const Tensor& qtensor) override;
bool equalTo(QuantizerPtr other) const override {
if (!other.get() || other->qscheme() != kPerChannelAffine) {
return false;
}
auto* other_per_channel_affine =
static_cast<PerChannelAffineQuantizer*>(other.get());
return scalar_type() == other_per_channel_affine->scalar_type() &&
scales().equal(other_per_channel_affine->scales()) &&
zero_points().equal(other_per_channel_affine->zero_points()) &&
axis() == other_per_channel_affine->axis();
}
protected:
Tensor scales_;
Tensor zero_points_;
const int64_t axis_;
};
/**
* PerChannelAffineFloatQParamsQuantizer is the same as PerChannelAffineQuantizer
* except that it expects both scale and zero point to be floating point values.
*
* This quantizer uses the kPerChannelAffineFloatQParams qscheme which is a variant of
* kPerChannelAffine.
*
* The quantize equation in this case looks like -
* Xq = (Xf - zero_point) * inv_scale, where inv_scale = 1.0/scale
*
* Note: Usage of floating point zero point is useful in cases where 0 doesn't need to
* be exactly represented in the quantized space. We can get additional precision by
* using floating point values for zero point.
*/
struct TORCH_API PerChannelAffineFloatQParamsQuantizer : public PerChannelAffineQuantizer {
explicit PerChannelAffineFloatQParamsQuantizer(
ScalarType scalar_type,
Tensor scales,
Tensor zero_points,
int64_t axis)
: PerChannelAffineQuantizer(scalar_type,
scales,
zero_points,
axis) {}
QScheme qscheme() const override {
return kPerChannelAffineFloatQParams;
}
Tensor quantize(const Tensor& tensor) override;
Tensor dequantize(const Tensor& qtensor) override;
Tensor& dequantize_out(Tensor& rtensor, const Tensor& qtensor) override;
bool equalTo(QuantizerPtr other) const override {
if (!other.get() || other->qscheme() != kPerChannelAffineFloatQParams) {
return false;
}
auto* other_per_channel_float_qparams =
static_cast<PerChannelAffineFloatQParamsQuantizer*>(other.get());
return scalar_type() == other_per_channel_float_qparams->scalar_type() &&
scales().equal(other_per_channel_float_qparams->scales()) &&
zero_points().equal(other_per_channel_float_qparams->zero_points()) &&
axis() == other_per_channel_float_qparams->axis();
}
};
// This is an internal utility function for getting at the QTensorImpl,
// You should only use this for writing low level
// setters/getters for QTensorImpl fields; otherwise, you should use
// the low level setters/getters that were implemented using this.
// This may be called repeatedly, so make sure it's pretty cheap.
TORCH_API QTensorImpl* get_qtensorimpl(const TensorBase& self);
// double and int64_t are because of the native function API, we only have these
// argument types right now in native functions
TORCH_API QuantizerPtr
make_per_tensor_affine_quantizer(
double scale, int64_t zero_point, ScalarType scalar_type);
TORCH_API QuantizerPtr make_per_channel_affine_quantizer(
const Tensor& scales,
const Tensor& zero_points,
int64_t axis,
ScalarType scalar_type);
TORCH_API QuantizerPtr make_unknown_quantizer(ScalarType scalar_type);
// Create a Quantized Tensor given arguments for normal Tensor and a quantizer
TORCH_API Tensor new_qtensor(
IntArrayRef sizes,
const TensorOptions& options,
QuantizerPtr quantizer);
TORCH_API void set_quantizer_(const Tensor& self, ConstQuantizerPtr quantizer);
TORCH_API Tensor from_blob_quantized_per_tensor_affine(
void* data,
IntArrayRef sizes,
IntArrayRef strides,
std::function<void(void*)> deleter,
const float scale,
const int64_t zeroPoint,
const TensorOptions& options);
TORCH_API Tensor from_blob_quantized_per_tensor_affine(
void* data,
IntArrayRef sizes,
std::function<void(void*)> deleter,
const float scale,
const int64_t zeroPoint,
const TensorOptions& options);
TORCH_API Tensor from_blob_quantized_per_channel_affine(
void* data,
IntArrayRef sizes,
std::function<void(void*)> deleter,
const Tensor& scales,
const Tensor& zero_points,
const int64_t axis,
const TensorOptions& options);
} // namespace at
| 9,232
| 31.975
| 93
|
h
|
null |
pytorch-main/aten/src/ATen/templates/DispatchKeyFunction.h
|
#pragma once
// ${generated_comment}
// NB: The implementing C++ file is RegisterDispatchKey.cpp
// The only #includes we need are for custom classes that have defaults in the C++ API
#include <c10/core/MemoryFormat.h>
#include <c10/core/Scalar.h>
#include <ATen/core/Reduction.h>
// Forward declarations of any types needed in the operator signatures.
// We can't directly include these classes because it will cause circular include dependencies.
// This file is included by TensorBody.h, which defines the Tensor class.
#include <ATen/core/ATen_fwd.h>
namespace at {
namespace ${dispatch_namespace} {
${dispatch_namespaced_declarations}
} // namespace ${dispatch_namespace}
} // namespace at
| 702
| 28.291667
| 95
|
h
|
null |
pytorch-main/aten/src/ATen/templates/DispatchKeyFunctions.h
|
#include <ATen/core/TensorBody.h>
// TODO Undo all logic introduced for Note [Avoiding Include Cycles In Static Dispatch]
// Code introduced to avoid cyclic dependency in static dispatch is no longer
// needed as static dispatch logic is moved from TensorBody.h, which caused cycles in the first place,
// to Operators.cpp for supporting multiple backends with multiple kernels.
//
// Note [Avoiding Include Cycles In Static Dispatch]
// In order to avoid #include cycles in the static dispatch build, we've carefully split out
// the static function definition files into {DispatchKey}Functions.h and {DispatchKey}Functions_inl.h.
//
// Without this split, the include cycle looks like TensorBody.h -> CPUFunctions.h -> TensorBody.h.
// - TensorBody.h #includes CPUFunctions.h in the static dispatch build, because the tensor methods
// all need to call into the fastpath C++ API defined in CPUFunctions.h. The methods are also all
// directly inlined into TensorBody.h.
// - CPUFunctions.h #includes TensorBody.h because it contains function declarations for the entire C++ API,
// which include functions that have defaultable optional<Tensor> arguments.
// That requires knowing the full Tensor class definition.
//
// We break the cycle by doing the following:
// - Split out CPUFunction.h into two files: CPUFunctions.h and CPUFunctions_inl.h
// - CPUFunction.h is a dummy file that just includes the Tensor class and includes CPUFunctions_inl.,
// - CPUFunctions_inl.h includes everything else
// - (only in the static dispatch build) TensorBody.h makes sure to finish defining the Tensor class,
// and then it includes CPUFunctions_inl.h.
// - All other files that want the cpu fastpath functions can include CPUFunctions.h directly.
// - This also means that static dispatch build, CPUFunctions.h only needs to
// #include TensorBody.h, and it will automatically bring in CPUFunctions_inl.h.
${inline_headers}
| 1,932
| 63.433333
| 108
|
h
|
null |
pytorch-main/aten/src/ATen/templates/DispatchKeyFunctions_inl.h
|
#pragma once
// ${generated_comment}
// NB: The implementing C++ file is RegisterDispatchKey.cpp
// The only #includes we need are for custom classes that have defaults in the C++ API
#include <c10/core/MemoryFormat.h>
#include <c10/core/Scalar.h>
#include <ATen/core/Reduction.h>
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
#error This change adds a dependency on all pytorch operators, meaning the \
file will need to be re-compiled every time an operator is changed or added. \
Consider including a specific operator from \
<ATen/ops/{my_operator}_${dispatch_namespace}_dispatch.h>. \
See NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
#endif
${DispatchKeyFunctions_inl_includes}
${dispatch_namespaced_declarations}
| 824
| 34.869565
| 86
|
h
|
null |
pytorch-main/aten/src/ATen/templates/Functions.h
|
#pragma once
// ${generated_comment}
#ifdef TORCH_ASSERT_NO_OPERATORS
#error This change adds a dependency on native_functions.yaml, \
meaning the file will need to be re-compiled every time an operator \
is changed or added. Consider if your change would be better placed in \
another file, or if a more specific header might achieve the same goal. \
See NOTE: [Tensor vs. TensorBase]
#endif
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
#error This change adds a dependency on all pytorch operators, meaning the \
file will need to be re-compiled every time an operator is changed or added. \
Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
#endif
// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
//
// In ATen, certain generated headers files include the definitions of
// every single operator in PyTorch. Unfortunately this means every
// time an operator signature is updated or changed in
// native_functions.yaml, you (and every other PyTorch developer) need
// to recompile every source file that includes any of these headers.
//
// To break up these header dependencies, and improve incremental
// build times for all PyTorch developers. These headers are split
// into per-operator headers in the `ATen/ops` folder. This limits
// incremental builds to only changes to methods of `Tensor`, or files
// that use the specific operator being changed. With `at::sum` as an
// example, you should include
//
// <ATen/ops/sum.h> // instead of ATen/Functions.h
// <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
// <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
// <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
//
// However, even if you're careful to use this in your own code.
// `Functions.h` might be included indirectly through another header
// without you realising. To avoid this, you can add
//
// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
//
// to the top of your source file. This way any time the non-specific
// headers are included, the compiler will error out.
//
// Also, be aware that `ops` are not available in all build
// configurations (namely fb-internal) so you must guard these
// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
//
// #ifndef AT_PER_OPERATOR_HEADERS
// #include <ATen/Functions.h>
// #else
// #include <ATen/ops/sum.h>
// #endif
#include <ATen/Context.h>
#include <ATen/DeviceGuard.h>
#include <ATen/TensorUtils.h>
#include <ATen/TracerMode.h>
#include <ATen/core/Generator.h>
#include <ATen/core/Reduction.h>
#include <c10/core/SymInt.h>
#include <ATen/core/Tensor.h>
#include <c10/core/Scalar.h>
#include <c10/core/Storage.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/Deprecated.h>
#include <c10/util/Optional.h>
#include <c10/util/OptionalArrayRef.h>
#include <ATen/ops/from_blob.h>
#include <ATen/ops/tensor.h>
${Functions_includes}
namespace at {
${Functions_declarations}
// Special C++ only overloads for std()-like functions (See gh-40287)
// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
// So, for example std(0) would select the std(unbiased=False) overload
TORCH_API inline Tensor var(const Tensor& self, int dim) {
return at::var(self, IntArrayRef{dim});
}
TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
return at::var_mean(self, IntArrayRef{dim});
}
TORCH_API inline Tensor std(const Tensor& self, int dim) {
return at::std(self, IntArrayRef{dim});
}
TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
return at::std_mean(self, IntArrayRef{dim});
}
inline int64_t numel(const Tensor& tensor) {
return tensor.numel();
}
inline int64_t size(const Tensor& tensor, int64_t dim) {
return tensor.size(dim);
}
inline int64_t stride(const Tensor& tensor, int64_t dim) {
return tensor.stride(dim);
}
inline bool is_complex(const Tensor& tensor) {
return tensor.is_complex();
}
inline bool is_floating_point(const Tensor& tensor) {
return tensor.is_floating_point();
}
inline bool is_signed(const Tensor& tensor) {
return tensor.is_signed();
}
inline bool is_inference(const Tensor& tensor) {
return tensor.is_inference();
}
inline bool _is_zerotensor(const Tensor& tensor) {
return tensor._is_zerotensor();
}
inline bool is_conj(const Tensor& tensor) {
return tensor.is_conj();
}
inline Tensor conj(const Tensor& tensor) {
return tensor.conj();
}
inline bool is_neg(const Tensor& tensor) {
return tensor.is_neg();
}
}
| 4,688
| 31.5625
| 91
|
h
|
null |
pytorch-main/aten/src/ATen/templates/LazyIr.h
|
#pragma once
// This file contains autogenerated LazyTensor IR nodes
${lazy_ir_sysinc}
${lazy_ir_inc}
${namespace_prologue}
using at::operator<<;
// kNullValue is used to contribute a static hash value any time
// a node has an Optional<Value> input that is nullopt. It is important
// to differentiate between HASH(nullopt, something) and HASH(something, nullopt),
// and using kNullValue in the hash function in the order of arguments
// serves this purpose.
static const torch::lazy::Value kNullValue = torch::lazy::Value();
${ir_declarations}
${namespace_epilogue}
| 575
| 27.8
| 82
|
h
|
null |
pytorch-main/aten/src/ATen/templates/MethodOperators.h
|
#pragma once
// ${generated_comment}
#ifdef TORCH_ASSERT_NO_OPERATORS
#error This change adds a dependency on native_functions.yaml, \
meaning the file will need to be re-compiled every time an operator \
is changed or added. Consider if your change would be better placed in \
another file, or if a more specific header might achieve the same goal. \
See NOTE: [Tensor vs. TensorBase]
#endif
// Forward declarations of any types needed in the operator signatures.
// We can't directly include these classes because it will cause circular include dependencies.
// This file is included by TensorBody.h, which defines the Tensor class.
#include <ATen/core/ATen_fwd.h>
${MethodOperators_includes}
namespace at {
namespace _ops {
${MethodOperators_declarations}
} // namespace _ops
} // namespace at
| 830
| 32.24
| 95
|
h
|
null |
pytorch-main/aten/src/ATen/templates/NativeFunctions.h
|
#pragma once
// ${generated_comment}
#ifdef TORCH_ASSERT_NO_OPERATORS
#error This change adds a dependency on native_functions.yaml, \
meaning the file will need to be re-compiled every time an operator \
is changed or added. Consider if your change would be better placed in \
another file, or if a more specific header might achieve the same goal. \
See NOTE: [Tensor vs. TensorBase]
#endif
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
#error This change adds a dependency on all pytorch operators, meaning the \
file will need to be re-compiled every time an operator is changed or added. \
Consider including a specific operator from <ATen/ops/{my_operator}_native.h> \
and see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
#endif
#include <c10/core/Scalar.h>
#include <c10/core/Storage.h>
#include <c10/core/TensorOptions.h>
#include <c10/util/Deprecated.h>
#include <c10/util/Optional.h>
#include <c10/core/QScheme.h>
#include <ATen/core/Reduction.h>
#include <ATen/core/Tensor.h>
#include <tuple>
#include <vector>
${NativeFunctions_includes}
${NativeFunctions_declarations}
| 1,160
| 33.147059
| 83
|
h
|
null |
pytorch-main/aten/src/ATen/templates/RedispatchFunctions.h
|
#pragma once
// ${generated_comment}
#ifdef TORCH_ASSERT_ONLY_METHOD_OPERATORS
#error This change adds a dependency on all pytorch operators, meaning the \
file will need to be re-compiled every time an operator is changed or added. \
Consider using the at::_ops::{name}::redispatch() interface by including \
the specific operator from <ATen/ops/{my_operator}_ops.h>
#endif
#include <c10/core/Scalar.h>
#include <ATen/Tensor.h>
#include <c10/core/Storage.h>
#include <ATen/core/Generator.h>
#include <c10/util/Deprecated.h>
#include <ATen/DeviceGuard.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Reduction.h>
#include <c10/util/Optional.h>
#include <ATen/TensorUtils.h>
#include <ATen/Context.h>
#include <ATen/TracerMode.h>
#include <ATen/Operators.h>
namespace at {
namespace redispatch {
${function_redispatch_definitions}
} // namespace redispatch
}
| 893
| 26.090909
| 80
|
h
|
null |
pytorch-main/aten/src/ATen/templates/UnboxingFunctions.h
|
// ${generated_comment}
// Generated by tools/jit/gen_unboxing.py. This file declares code generated boxed C++ functions for operators,
// base off of native_functions.yaml (or similar yaml file with the same syntax). The definition of such a boxed
// function will pop out IValues from the stack then convert them into the correct C++ types based on given schema. This
// unboxing logic is an alternative to template-based metaprogramming unboxing.
#pragma once
#include <ATen/ATen.h>
namespace at {
namespace unboxing {
namespace {
template<typename T, size_t N>
std::array<T, N> as_array(const c10::List<c10::IValue>& list) {
std::array<T, N> res;
AT_ASSERT(list.size() == N);
std::vector<T> vec;
for (c10::IValue elem : list) {
vec.push_back(elem.to<T>());
}
std::copy(vec.begin(), vec.end(), res.begin());
return res;
}
} // namespace <anonymous>
using Stack = std::vector<c10::IValue>;
// Generated function declaration
${declarations}
} // namespace unboxing
} // namespace at
| 1,026
| 30.121212
| 120
|
h
|
null |
pytorch-main/aten/src/ATen/templates/aten_interned_strings.h
|
#pragma once
// ${generated_comment}
#if defined(TORCH_ASSERT_NO_OPERATORS) || defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
#error This change adds a dependency on native_functions.yaml, \
meaning the file will need to be re-compiled every time an operator \
is changed or added. Consider if including <ATen/core/symbol.h> for \
the c10::Symbol class would be sufficient, or if your change would be \
better placed in another file.
#endif
// ATen symbols correspond exactly to operators defined in ATen. Every
// symbol here corresponds exactly to an ATen operation defined in
// native_functions.yaml; attributes are in one-to-one correspondence
// with their ATen name.
#define FORALL_ATEN_BASE_SYMBOLS(_) \
${aten_symbols}
#define FORALL_ATTR_BASE_SYMBOLS(_) \
${attr_symbols}
| 805
| 34.043478
| 85
|
h
|
null |
pytorch-main/aten/src/ATen/test/reportMemoryUsage.h
|
#pragma once
#include <ATen/ATen.h>
#include <c10/core/Allocator.h>
#include <c10/util/ThreadLocalDebugInfo.h>
class TestMemoryReportingInfo : public c10::MemoryReportingInfoBase {
public:
struct Record {
void* ptr;
int64_t alloc_size;
size_t total_allocated;
size_t total_reserved;
c10::Device device;
};
std::vector<Record> records;
TestMemoryReportingInfo() = default;
~TestMemoryReportingInfo() override = default;
void reportMemoryUsage(
void* ptr,
int64_t alloc_size,
size_t total_allocated,
size_t total_reserved,
c10::Device device) override {
records.emplace_back(
Record{ptr, alloc_size, total_allocated, total_reserved, device});
}
bool memoryProfilingEnabled() const override {
return true;
}
Record getLatestRecord() {
return records.back();
}
};
| 861
| 20.02439
| 74
|
h
|
null |
pytorch-main/aten/src/ATen/test/rng_test.h
|
#include <gtest/gtest.h>
#include <ATen/Generator.h>
#include <ATen/Tensor.h>
#include <ATen/native/TensorIterator.h>
#include <torch/library.h>
#include <c10/util/Optional.h>
#include <torch/all.h>
#include <stdexcept>
namespace {
constexpr auto int64_min_val = std::numeric_limits<int64_t>::lowest();
constexpr auto int64_max_val = std::numeric_limits<int64_t>::max();
template <typename T,
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
constexpr int64_t _min_val() {
return int64_min_val;
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
constexpr int64_t _min_val() {
return static_cast<int64_t>(std::numeric_limits<T>::lowest());
}
template <typename T,
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
constexpr int64_t _min_from() {
return -(static_cast<int64_t>(1) << std::numeric_limits<T>::digits);
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
constexpr int64_t _min_from() {
return _min_val<T>();
}
template <typename T,
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
constexpr int64_t _max_val() {
return int64_max_val;
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
constexpr int64_t _max_val() {
return static_cast<int64_t>(std::numeric_limits<T>::max());
}
template <typename T,
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
constexpr int64_t _max_to() {
return static_cast<int64_t>(1) << std::numeric_limits<T>::digits;
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
constexpr int64_t _max_to() {
return _max_val<T>();
}
template<typename RNG, c10::ScalarType S, typename T>
void test_random_from_to(const at::Device& device) {
constexpr int64_t min_val = _min_val<T>();
constexpr int64_t min_from = _min_from<T>();
constexpr int64_t max_val = _max_val<T>();
constexpr int64_t max_to = _max_to<T>();
constexpr auto uint64_max_val = std::numeric_limits<uint64_t>::max();
std::vector<int64_t> froms;
std::vector<c10::optional<int64_t>> tos;
if constexpr (::std::is_same_v<T, bool>) {
froms = {
0L
};
tos = {
1L,
static_cast<c10::optional<int64_t>>(c10::nullopt)
};
} else if constexpr (::std::is_signed<T>::value) {
froms = {
min_from,
-42L,
0L,
42L
};
tos = {
c10::optional<int64_t>(-42L),
c10::optional<int64_t>(0L),
c10::optional<int64_t>(42L),
c10::optional<int64_t>(max_to),
static_cast<c10::optional<int64_t>>(c10::nullopt)
};
} else {
froms = {
0L,
42L
};
tos = {
c10::optional<int64_t>(42L),
c10::optional<int64_t>(max_to),
static_cast<c10::optional<int64_t>>(c10::nullopt)
};
}
const std::vector<uint64_t> vals = {
0L,
42L,
static_cast<uint64_t>(max_val),
static_cast<uint64_t>(max_val) + 1,
uint64_max_val
};
bool full_64_bit_range_case_covered = false;
bool from_to_case_covered = false;
bool from_case_covered = false;
for (const int64_t from : froms) {
for (const c10::optional<int64_t> to : tos) {
if (!to.has_value() || from < *to) {
for (const uint64_t val : vals) {
auto gen = at::make_generator<RNG>(val);
auto actual = torch::empty({3, 3}, torch::TensorOptions().dtype(S).device(device));
actual.random_(from, to, gen);
T exp;
uint64_t range;
if (!to.has_value() && from == int64_min_val) {
exp = static_cast<int64_t>(val);
full_64_bit_range_case_covered = true;
} else {
if (to.has_value()) {
range = static_cast<uint64_t>(*to) - static_cast<uint64_t>(from);
from_to_case_covered = true;
} else {
range = static_cast<uint64_t>(max_to) - static_cast<uint64_t>(from) + 1;
from_case_covered = true;
}
if (range < (1ULL << 32)) {
exp = static_cast<T>(static_cast<int64_t>((static_cast<uint32_t>(val) % range + from)));
} else {
exp = static_cast<T>(static_cast<int64_t>((val % range + from)));
}
}
ASSERT_TRUE(from <= exp);
if (to.has_value()) {
ASSERT_TRUE(static_cast<int64_t>(exp) < *to);
}
const auto expected = torch::full_like(actual, exp);
if constexpr (::std::is_same_v<T, bool>) {
ASSERT_TRUE(torch::allclose(actual.toType(torch::kInt), expected.toType(torch::kInt)));
} else {
ASSERT_TRUE(torch::allclose(actual, expected));
}
}
}
}
}
if constexpr (::std::is_same_v<T, int64_t>) {
ASSERT_TRUE(full_64_bit_range_case_covered);
}
ASSERT_TRUE(from_to_case_covered);
ASSERT_TRUE(from_case_covered);
}
template<typename RNG, c10::ScalarType S, typename T>
void test_random(const at::Device& device) {
const auto max_val = _max_val<T>();
const auto uint64_max_val = std::numeric_limits<uint64_t>::max();
const std::vector<uint64_t> vals = {
0L,
42L,
static_cast<uint64_t>(max_val),
static_cast<uint64_t>(max_val) + 1,
uint64_max_val
};
for (const uint64_t val : vals) {
auto gen = at::make_generator<RNG>(val);
auto actual = torch::empty({3, 3}, torch::TensorOptions().dtype(S).device(device));
actual.random_(gen);
uint64_t range;
if constexpr (::std::is_floating_point<T>::value) {
range = static_cast<uint64_t>((1ULL << ::std::numeric_limits<T>::digits) + 1);
} else if constexpr (::std::is_same_v<T, bool>) {
range = 2;
} else {
range = static_cast<uint64_t>(::std::numeric_limits<T>::max()) + 1;
}
T exp;
if constexpr (::std::is_same_v<T, double> || ::std::is_same_v<T, int64_t>) {
exp = val % range;
} else {
exp = static_cast<uint32_t>(val) % range;
}
ASSERT_TRUE(0 <= static_cast<int64_t>(exp));
ASSERT_TRUE(static_cast<uint64_t>(exp) < range);
const auto expected = torch::full_like(actual, exp);
if constexpr (::std::is_same_v<T, bool>) {
ASSERT_TRUE(torch::allclose(actual.toType(torch::kInt), expected.toType(torch::kInt)));
} else {
ASSERT_TRUE(torch::allclose(actual, expected));
}
}
}
}
| 6,512
| 29.152778
| 102
|
h
|
null |
pytorch-main/aten/src/ATen/test/test_assert.h
|
#pragma once
#include <stdexcept>
#include <stdarg.h>
static inline void barf(const char *fmt, ...) {
char msg[2048];
va_list args;
va_start(args, fmt);
vsnprintf(msg, 2048, fmt, args);
va_end(args);
throw std::runtime_error(msg);
}
#if defined(_MSC_VER) && _MSC_VER <= 1900
#define __func__ __FUNCTION__
#endif
#if defined(__GNUC__) || defined(__ICL) || defined(__clang__)
#define AT_EXPECT(x, y) (__builtin_expect((x),(y)))
#else
#define AT_EXPECT(x, y) (x)
#endif
#define ASSERT(cond) \
if (AT_EXPECT(!(cond), 0)) { \
barf("%s:%u: %s: Assertion `%s` failed.", __FILE__, __LINE__, __func__, #cond); \
}
#define TRY_CATCH_ELSE(fn, catc, els) \
{ \
/* avoid mistakenly passing if els code throws exception*/ \
bool _passed = false; \
try { \
fn; \
_passed = true; \
els; \
} catch (const std::exception &e) { \
ASSERT(!_passed); \
catc; \
} \
}
#define ASSERT_THROWSM(fn, message) \
TRY_CATCH_ELSE(fn, ASSERT(std::string(e.what()).find(message) != std::string::npos), ASSERT(false))
#define ASSERT_THROWS(fn) \
ASSERT_THROWSM(fn, "");
#define ASSERT_EQUAL(t1, t2) \
ASSERT(t1.equal(t2));
// allclose broadcasts, so check same size before allclose.
#define ASSERT_ALLCLOSE(t1, t2) \
ASSERT(t1.is_same_size(t2)); \
ASSERT(t1.allclose(t2));
// allclose broadcasts, so check same size before allclose.
#define ASSERT_ALLCLOSE_TOLERANCES(t1, t2, atol, rtol) \
ASSERT(t1.is_same_size(t2)); \
ASSERT(t1.allclose(t2, atol, rtol));
| 2,038
| 32.42623
| 101
|
h
|
null |
pytorch-main/benchmarks/static_runtime/deep_wide_pt.h
|
#pragma once
#include <ATen/CPUFunctions.h>
#include <ATen/NativeFunctions.h>
#include <torch/torch.h>
struct DeepAndWide : torch::nn::Module {
DeepAndWide(int num_features = 50) {
mu_ = register_parameter("mu_", torch::randn({1, num_features}));
sigma_ = register_parameter("sigma_", torch::randn({1, num_features}));
fc_w_ = register_parameter("fc_w_", torch::randn({1, num_features + 1}));
fc_b_ = register_parameter("fc_b_", torch::randn({1}));
}
torch::Tensor forward(
torch::Tensor ad_emb_packed,
torch::Tensor user_emb,
torch::Tensor wide) {
auto wide_offset = wide + mu_;
auto wide_normalized = wide_offset * sigma_;
auto wide_noNaN = wide_normalized;
// Placeholder for ReplaceNaN
auto wide_preproc = torch::clamp(wide_noNaN, -10.0, 10.0);
auto user_emb_t = torch::transpose(user_emb, 1, 2);
auto dp_unflatten = torch::bmm(ad_emb_packed, user_emb_t);
auto dp = torch::flatten(dp_unflatten, 1);
auto input = torch::cat({dp, wide_preproc}, 1);
auto fc1 = torch::nn::functional::linear(input, fc_w_, fc_b_);
auto pred = torch::sigmoid(fc1);
return pred;
}
torch::Tensor mu_, sigma_, fc_w_, fc_b_;
};
// Implementation using native functions and pre-allocated tensors.
// It could be used as a "speed of light" for static runtime.
struct DeepAndWideFast : torch::nn::Module {
DeepAndWideFast(int num_features = 50) {
mu_ = register_parameter("mu_", torch::randn({1, num_features}));
sigma_ = register_parameter("sigma_", torch::randn({1, num_features}));
fc_w_ = register_parameter("fc_w_", torch::randn({1, num_features + 1}));
fc_b_ = register_parameter("fc_b_", torch::randn({1}));
allocated = false;
prealloc_tensors = {};
}
torch::Tensor forward(
torch::Tensor ad_emb_packed,
torch::Tensor user_emb,
torch::Tensor wide) {
torch::NoGradGuard no_grad;
if (!allocated) {
auto wide_offset = at::add(wide, mu_);
auto wide_normalized = at::mul(wide_offset, sigma_);
// Placeholder for ReplaceNaN
auto wide_preproc = at::cpu::clamp(wide_normalized, -10.0, 10.0);
auto user_emb_t = at::native::transpose(user_emb, 1, 2);
auto dp_unflatten = at::cpu::bmm(ad_emb_packed, user_emb_t);
// auto dp = at::native::flatten(dp_unflatten, 1);
auto dp = dp_unflatten.view({dp_unflatten.size(0), 1});
auto input = at::cpu::cat({dp, wide_preproc}, 1);
// fc1 = torch::nn::functional::linear(input, fc_w_, fc_b_);
fc_w_t_ = torch::t(fc_w_);
auto fc1 = torch::addmm(fc_b_, input, fc_w_t_);
auto pred = at::cpu::sigmoid(fc1);
prealloc_tensors = {
wide_offset,
wide_normalized,
wide_preproc,
user_emb_t,
dp_unflatten,
dp,
input,
fc1,
pred};
allocated = true;
return pred;
} else {
// Potential optimization: add and mul could be fused together (e.g. with
// Eigen).
at::add_out(prealloc_tensors[0], wide, mu_);
at::mul_out(prealloc_tensors[1], prealloc_tensors[0], sigma_);
at::native::clip_out(
prealloc_tensors[1], -10.0, 10.0, prealloc_tensors[2]);
// Potential optimization: original tensor could be pre-transposed.
// prealloc_tensors[3] = at::native::transpose(user_emb, 1, 2);
if (prealloc_tensors[3].data_ptr() != user_emb.data_ptr()) {
auto sizes = user_emb.sizes();
auto strides = user_emb.strides();
prealloc_tensors[3].set_(
user_emb.storage(),
0,
{sizes[0], sizes[2], sizes[1]},
{strides[0], strides[2], strides[1]});
}
// Potential optimization: call MKLDNN directly.
at::cpu::bmm_out(ad_emb_packed, prealloc_tensors[3], prealloc_tensors[4]);
if (prealloc_tensors[5].data_ptr() != prealloc_tensors[4].data_ptr()) {
// in unlikely case that the input tensor changed we need to
// reinitialize the view
prealloc_tensors[5] =
prealloc_tensors[4].view({prealloc_tensors[4].size(0), 1});
}
// Potential optimization: we can replace cat with carefully constructed
// tensor views on the output that are passed to the _out ops above.
at::cpu::cat_outf(
{prealloc_tensors[5], prealloc_tensors[2]}, 1, prealloc_tensors[6]);
at::cpu::addmm_out(
prealloc_tensors[7], fc_b_, prealloc_tensors[6], fc_w_t_, 1, 1);
at::cpu::sigmoid_out(prealloc_tensors[7], prealloc_tensors[8]);
return prealloc_tensors[8];
}
}
torch::Tensor mu_, sigma_, fc_w_, fc_b_, fc_w_t_;
std::vector<torch::Tensor> prealloc_tensors;
bool allocated = false;
};
torch::jit::Module getDeepAndWideSciptModel(int num_features = 50);
torch::jit::Module getTrivialScriptModel();
torch::jit::Module getLeakyReLUScriptModel();
torch::jit::Module getLeakyReLUConstScriptModel();
torch::jit::Module getLongScriptModel();
torch::jit::Module getSignedLog1pModel();
| 5,034
| 34.457746
| 80
|
h
|
null |
pytorch-main/benchmarks/static_runtime/test_utils.h
|
// Copyright (c) Meta Platforms, Inc. and affiliates.
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#pragma once
#include <string>
#include <vector>
#include <torch/csrc/jit/ir/ir.h>
#include <torch/csrc/jit/runtime/static/impl.h>
namespace c10 {
struct IValue;
}
namespace torch {
namespace jit {
struct Node;
class StaticModule;
namespace test {
// Given a model/function in jit or IR script, run the model/function
// with the jit interpreter and static runtime, and compare the results
void testStaticRuntime(
const std::string& source,
const std::vector<c10::IValue>& args,
const std::vector<c10::IValue>& args2 = {},
const bool use_allclose = false,
const bool use_equalnan = false,
const bool check_resize = true);
std::shared_ptr<Graph> getGraphFromScript(const std::string& jit_script);
std::shared_ptr<Graph> getGraphFromIR(const std::string& ir);
bool hasProcessedNodeWithName(
torch::jit::StaticModule& smodule,
const char* name);
at::Tensor getTensor(const at::IValue& ival);
Node* getNodeWithKind(const StaticModule& smodule, const std::string& kind);
Node* getNodeWithKind(std::shared_ptr<Graph>& graph, const std::string& kind);
bool hasNodeWithKind(const StaticModule& smodule, const std::string& kind);
bool hasNodeWithKind(std::shared_ptr<Graph>& graph, const std::string& kind);
void compareResultsWithJIT(
StaticRuntime& runtime,
const std::shared_ptr<Graph>& graph,
const std::vector<c10::IValue>& args,
const bool use_allclose = false,
const bool use_equalnan = false);
void compareResults(
const IValue& expect,
const IValue& actual,
const bool use_allclose = false,
const bool use_equalnan = false);
} // namespace test
} // namespace jit
} // namespace torch
| 1,859
| 26.352941
| 78
|
h
|
null |
pytorch-main/binaries/benchmark_args.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "c10/util/Flags.h"
C10_DEFINE_string(
backend,
"builtin",
"The backend to use when running the model. The allowed "
"backend choices are: builtin, default, nnpack, eigen, mkl, cuda");
C10_DEFINE_string(init_net, "", "The given net to initialize any parameters.");
C10_DEFINE_string(
input,
"",
"Input that is needed for running the network. If "
"multiple input needed, use comma separated string.");
C10_DEFINE_string(
input_dims,
"",
"Alternate to input_files, if all inputs are simple "
"float TensorCPUs, specify the dimension using comma "
"separated numbers. If multiple input needed, use "
"semicolon to separate the dimension of different "
"tensors.");
C10_DEFINE_string(
input_file,
"",
"Input file that contain the serialized protobuf for "
"the input blobs. If multiple input needed, use comma "
"separated string. Must have the same number of items "
"as input does.");
C10_DEFINE_string(
input_type,
"float",
"Input type when specifying the input dimension."
"The supported types are float, uint8_t.");
C10_DEFINE_int(iter, 10, "The number of iterations to run.");
C10_DEFINE_bool(
measure_memory,
false,
"Whether to measure increase in allocated memory while "
"loading and running the net.");
C10_DEFINE_string(net, "", "The given net to benchmark.");
C10_DEFINE_string(
output,
"",
"Output that should be dumped after the execution "
"finishes. If multiple outputs are needed, use comma "
"separated string. If you want to dump everything, pass "
"'*' as the output value.");
C10_DEFINE_string(
output_folder,
"",
"The folder that the output should be written to. This "
"folder must already exist in the file system.");
C10_DEFINE_bool(
run_individual,
false,
"Whether to benchmark individual operators.");
C10_DEFINE_int(
sleep_before_run,
0,
"The seconds to sleep before starting the benchmarking.");
C10_DEFINE_int(
sleep_between_iteration,
0,
"The seconds to sleep between the individual iterations.");
C10_DEFINE_int(
sleep_between_net_and_operator,
0,
"The seconds to sleep between net and operator runs.");
C10_DEFINE_bool(
text_output,
false,
"Whether to write out output in text format for regression purpose.");
C10_DEFINE_int(warmup, 0, "The number of iterations to warm up.");
C10_DEFINE_bool(
wipe_cache,
false,
"Whether to evict the cache before running network.");
| 3,163
| 31.958333
| 79
|
h
|
null |
pytorch-main/binaries/benchmark_helper.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <string>
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/net.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/string_utils.h"
#include "c10/util/string_utils.h"
#include <c10/util/irange.h>
using std::map;
using std::shared_ptr;
using std::string;
using std::vector;
template <typename ContextType, typename TensorType>
void writeTextOutput(
TensorType* tensor,
const string& output_prefix,
const string& name,
int index,
int num_blobs) {
if (index >= num_blobs) {
return;
}
string filename = name;
std::replace(filename.begin(), filename.end(), '/', '_');
string output_name = output_prefix + "/" + filename + ".txt";
caffe2::TensorSerializer ser;
caffe2::BlobProto blob_proto;
ser.Serialize(
*tensor, output_name, blob_proto.mutable_tensor(), 0, tensor->numel());
blob_proto.set_name(output_name);
blob_proto.set_type("Tensor");
CAFFE_ENFORCE(blob_proto.has_tensor());
caffe2::TensorProto tensor_proto = blob_proto.tensor();
int dims_size = tensor_proto.dims_size();
long long elem_dim_size =
dims_size > 1 ? tensor_proto.dims(1) : tensor_proto.dims(0);
for (const auto i : c10::irange(2, dims_size)) {
elem_dim_size *= tensor_proto.dims(i);
}
std::vector<std::string> lines;
std::string dims;
for (const auto i : c10::irange(dims_size)) {
int dim = tensor_proto.dims(i);
if (i > 0) {
dims += ", ";
}
dims += c10::to_string(dim);
}
lines.push_back(dims);
std::stringstream line;
if (tensor_proto.data_type() == caffe2::TensorProto::FLOAT) {
auto start = tensor_proto.float_data().begin();
auto end = tensor_proto.float_data().end();
copy(start, end, std::ostream_iterator<float>(line, ","));
} else if (tensor_proto.data_type() == caffe2::TensorProto::INT32) {
auto start = tensor_proto.int32_data().begin();
auto end = tensor_proto.int32_data().end();
copy(start, end, std::ostream_iterator<int>(line, ","));
} else {
CAFFE_THROW("Unimplemented Blob type.");
}
// remove the last ,
string str = line.str();
if(str.length() != 0) {
str.pop_back();
}
lines.push_back(str);
// static casts are workaround for MSVC build
auto flags = static_cast<std::ios_base::openmode>(std::ios::out);
if (index != 0) {
flags |= static_cast<std::ios_base::openmode>(std::ios::app);
} else {
flags |= static_cast<std::ios_base::openmode>(std::ios::trunc);
}
std::ofstream output_file(output_name, flags);
std::ostream_iterator<std::string> output_iterator(output_file, "\n");
std::copy(lines.begin(), lines.end(), output_iterator);
}
void observerConfig();
bool backendCudaSet(const string&);
void setDeviceType(caffe2::NetDef*, caffe2::DeviceType&);
void setOperatorEngine(caffe2::NetDef*, const string&);
int loadInput(
shared_ptr<caffe2::Workspace> workspace,
const bool run_on_gpu,
map<string, caffe2::TensorProtos>& tensor_protos_map,
const string& input,
const string& input_file,
const string& input_dims,
const string& input_type);
void fillInputBlob(
shared_ptr<caffe2::Workspace> workspace,
map<string, caffe2::TensorProtos>& tensor_protos_map,
int iteration);
void writeOutput(
shared_ptr<caffe2::Workspace> workspace,
const bool run_on_gpu,
const string& output,
const string& output_folder,
const bool text_output,
const int index,
const int num_blobs);
void logBenchmarkResult(
const std::string& type,
const std::string& metric,
const std::string& unit,
const int value);
long getVirtualMemoryIfOptionEnabled(bool FLAGS_measure_memory);
void runNetwork(
shared_ptr<caffe2::Workspace> workspace,
caffe2::NetBase* net,
map<string, caffe2::TensorProtos>& tensor_protos_map,
const bool wipe_cache,
const bool run_individual,
const bool run_on_gpu,
const bool text_output,
const int warmup,
const int iter,
const int num_blobs,
const int sleep_before_run,
const int sleep_between_iteration,
const int sleep_between_net_and_operator,
const std::string& output,
const std::string& output_folder);
int benchmark(
int argc,
char* argv[],
const string& FLAGS_backend,
const string& FLAGS_init_net,
const string& FLAGS_input,
const string& FLAGS_input_dims,
const string& FLAGS_input_file,
const string& FLAGS_input_type,
int FLAGS_iter,
bool FLAGS_measure_memory,
const string& FLAGS_net,
const string& FLAGS_output,
const string& FLAGS_output_folder,
bool FLAGS_run_individual,
int FLAGS_sleep_before_run,
int FLAGS_sleep_between_iteration,
int FLAGS_sleep_between_net_and_operator,
bool FLAGS_text_output,
int FLAGS_warmup,
bool FLAGS_wipe_cache);
| 5,475
| 31.211765
| 77
|
h
|
null |
pytorch-main/c10/core/Allocator.h
|
#pragma once
#include <stddef.h>
#include <memory>
#include <c10/core/Device.h>
#include <c10/util/Exception.h>
#include <c10/util/ThreadLocalDebugInfo.h>
#include <c10/util/UniqueVoidPtr.h>
namespace c10 {
// A DataPtr is a unique pointer (with an attached deleter and some
// context for the deleter) to some memory, which also records what
// device is for its data.
//
// nullptr DataPtrs can still have a nontrivial device; this allows
// us to treat zero-size allocations uniformly with non-zero allocations.
//
class C10_API DataPtr {
private:
c10::detail::UniqueVoidPtr ptr_;
Device device_;
public:
// Choice of CPU here is arbitrary; if there's an "undefined" device
// we could use that too
DataPtr() : ptr_(), device_(DeviceType::CPU) {}
DataPtr(void* data, Device device) : ptr_(data), device_(device) {}
DataPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter, Device device)
: ptr_(data, ctx, ctx_deleter), device_(device) {}
void* operator->() const {
return ptr_.get();
}
void clear() {
ptr_.clear();
}
void* get() const {
return ptr_.get();
}
void* mutable_get() {
return ptr_.get();
}
void* get_context() const {
return ptr_.get_context();
}
void* release_context() {
return ptr_.release_context();
}
std::unique_ptr<void, DeleterFnPtr>&& move_context() {
return ptr_.move_context();
}
operator bool() const {
return static_cast<bool>(ptr_);
}
template <typename T>
T* cast_context(DeleterFnPtr expected_deleter) const {
return ptr_.cast_context<T>(expected_deleter);
}
DeleterFnPtr get_deleter() const {
return ptr_.get_deleter();
}
/**
* Compare the deleter in a DataPtr to expected_deleter.
* If it matches, replace the deleter with new_deleter
* and return true; otherwise, does nothing and returns
* false.
*
* In general, it is not safe to unconditionally set the
* deleter on a DataPtr, because you don't know what
* the deleter is, and thus will have a hard time properly
* disposing of the deleter without storing the original
* deleter (this is difficult to do, because DeleterFnPtr
* is not a closure, and because the context on DataPtr is
* only a single word, you generally don't have enough
* space to store both the original deleter and its context).
* However, in some cases, you know /exactly/ what the deleter
* is, and you have a new deleter that manually wraps
* the old one. In this case, you can safely swap the deleter
* after asserting that the deleters line up.
*
* What are the requirements on new_deleter? It must still
* properly dispose of the void* pointer passed in as its argument,
* where void* is whatever the context of the original deleter
* is. So in general, you expect the new deleter to look something
* like this:
*
* [](void* ptr) {
* some_new_stuff(ptr);
* get_orig_allocator()->raw_deleter(ptr);
* }
*
* Note that it won't work to close over the original
* allocator; you don't have enough space to do that! Also,
* it's unsafe to assume that the passed in pointer in
* question is the memory pointer in question; it might not
* be; be sure to read the source code of the Allocator
* in question to confirm this.
*/
C10_NODISCARD bool compare_exchange_deleter(
DeleterFnPtr expected_deleter,
DeleterFnPtr new_deleter) {
return ptr_.compare_exchange_deleter(expected_deleter, new_deleter);
}
Device device() const {
return device_;
}
// Unsafely mutates the device on a DataPtr. Under normal use,
// you should never actually need to call this function.
// We need this for the implementation of the hack detailed
// in Note [Masquerading as CUDA]
void unsafe_set_device(Device device) {
device_ = device;
}
};
// NB: Device is NOT tested for here; a CUDA nullptr is as much a nullptr as a
// CPU nullptr
inline bool operator==(const DataPtr& dp, std::nullptr_t) noexcept {
return !dp;
}
inline bool operator==(std::nullptr_t, const DataPtr& dp) noexcept {
return !dp;
}
inline bool operator!=(const DataPtr& dp, std::nullptr_t) noexcept {
return dp;
}
inline bool operator!=(std::nullptr_t, const DataPtr& dp) noexcept {
return dp;
}
// Note [raw_allocate/raw_deallocate and Thrust]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Thrust's support for custom allocators requires us to write something
// like this:
//
// class ThrustAllocator {
// char* allocate(size_t);
// void deallocate(char*, size_t);
// };
//
// This is not good for our unique_ptr based allocator interface, as
// there is no way to get to the context when we free.
//
// However, in some cases the context is exactly the same as
// the data pointer. In this case, we can support the "raw"
// allocate and deallocate interface. This is what
// raw_deleter signifies. By default, it returns a nullptr, which means that
// the raw interface is not implemented. Be sure to implement it whenever
// possible, or the raw interface will incorrectly reported as unsupported,
// when it is actually possible.
struct C10_API Allocator {
virtual ~Allocator() = default;
virtual DataPtr allocate(size_t n) const = 0;
// If this returns a non nullptr, it means that allocate()
// is guaranteed to return a unique_ptr with this deleter attached;
// it means the rawAllocate and rawDeallocate APIs are safe to use.
// This function MUST always return the same BoundDeleter.
virtual DeleterFnPtr raw_deleter() const {
return nullptr;
}
void* raw_allocate(size_t n) {
auto dptr = allocate(n);
AT_ASSERT(dptr.get() == dptr.get_context());
return dptr.release_context();
}
void raw_deallocate(void* ptr) {
auto d = raw_deleter();
AT_ASSERT(d);
d(ptr);
}
};
// This context is used to generate DataPtr which have arbitrary
// std::function deleters associated with them. In some user facing
// functions, we give a (user-friendly) interface for constructing
// tensors from external data which take an arbitrary std::function
// deleter. Grep for InefficientStdFunctionContext to find these
// occurrences.
//
// This context is inefficient because we have to do a dynamic
// allocation InefficientStdFunctionContext, on top of the dynamic
// allocation which is implied by std::function itself.
struct C10_API InefficientStdFunctionContext {
std::unique_ptr<void, std::function<void(void*)>> ptr_;
InefficientStdFunctionContext(
std::unique_ptr<void, std::function<void(void*)>>&& ptr)
: ptr_(std::move(ptr)) {}
static DataPtr makeDataPtr(
void* ptr,
const std::function<void(void*)>& deleter,
Device device);
};
/** Set the allocator for DeviceType `t`. The passed in allocator pointer is
* expected to have static lifetime; this function does NOT take ownership
* of the raw pointer. (The reason for this is to prevent existing pointers
* to an allocator of a particular device from being invalidated when
* SetAllocator is called.)
*
* Also note that this is not thread-safe, and we assume this function will
* only be called during initialization.
*
* The 'priority' flag is introduced when we want to overwrite the default
* allocator, since the allocators are set statically. The default priority
* is 0, which means the lowest. Only higher or equal priority can overwrite
* existing ones.
*/
C10_API void SetAllocator(DeviceType t, Allocator* alloc, uint8_t priority = 0);
C10_API Allocator* GetAllocator(const DeviceType& t);
template <DeviceType t>
struct AllocatorRegisterer {
explicit AllocatorRegisterer(Allocator* alloc) {
SetAllocator(t, alloc);
}
};
#define REGISTER_ALLOCATOR(t, f) \
namespace { \
static c10::AllocatorRegisterer<t> g_allocator_d(f); \
}
// An interface for reporting thread local memory usage
// per device
struct C10_API MemoryReportingInfoBase : public c10::DebugInfoBase {
MemoryReportingInfoBase();
~MemoryReportingInfoBase() override = default;
/**
* alloc_size corresponds to the size of the ptr.
*
* total_allocated corresponds to total allocated memory.
*
* total_reserved corresponds to total size of memory pool, both used and
* unused, if applicable.
*/
virtual void reportMemoryUsage(
void* ptr,
int64_t alloc_size,
size_t total_allocated,
size_t total_reserved,
Device device) = 0;
virtual void reportOutOfMemory(
int64_t alloc_size,
size_t total_allocated,
size_t total_reserved,
Device device);
virtual bool memoryProfilingEnabled() const = 0;
};
C10_API bool memoryProfilingEnabled();
C10_API void reportMemoryUsageToProfiler(
void* ptr,
int64_t alloc_size,
size_t total_allocated,
size_t total_reserved,
Device device);
C10_API void reportOutOfMemoryToProfiler(
int64_t alloc_size,
size_t total_allocated,
size_t total_reserved,
Device device);
// used to hold traceback information in allocators
struct GatheredContext {
virtual ~GatheredContext() = default;
};
} // namespace c10
| 9,201
| 32.100719
| 80
|
h
|
null |
pytorch-main/c10/core/AutogradState.h
|
#pragma once
#include <c10/macros/Export.h>
namespace c10 {
// Structure used to pack all the thread local boolean
// flags used by autograd
struct C10_API AutogradState {
static AutogradState& get_tls_state();
static void set_tls_state(AutogradState state);
AutogradState(
bool grad_mode,
bool inference_mode,
bool fw_grad_mode,
bool multithreading_enabled)
: grad_mode_(grad_mode),
inference_mode_(inference_mode),
fw_grad_mode_(fw_grad_mode),
multithreading_enabled_(multithreading_enabled),
view_replay_enabled_(false) {}
void set_grad_mode(bool enabled) {
grad_mode_ = enabled;
}
void set_fw_grad_mode(bool enabled) {
fw_grad_mode_ = enabled;
}
void set_inference_mode(bool enabled) {
inference_mode_ = enabled;
}
void set_multithreading_enabled(bool multithreading_enabled) {
multithreading_enabled_ = multithreading_enabled;
}
void set_view_replay_enabled(bool view_replay_enabled) {
view_replay_enabled_ = view_replay_enabled;
}
bool get_grad_mode() const {
return grad_mode_;
}
bool get_fw_grad_mode() const {
return fw_grad_mode_;
}
bool get_inference_mode() const {
return inference_mode_;
}
bool get_multithreading_enabled() const {
return multithreading_enabled_;
}
bool get_view_replay_enabled() const {
return view_replay_enabled_;
}
private:
bool grad_mode_ : 1;
bool inference_mode_ : 1;
bool fw_grad_mode_ : 1;
bool multithreading_enabled_ : 1;
bool view_replay_enabled_ : 1;
};
} // namespace c10
| 1,591
| 20.808219
| 64
|
h
|
null |
pytorch-main/c10/core/Backend.h
|
#pragma once
#include <c10/core/DeviceType.h>
#include <c10/core/DispatchKey.h>
#include <c10/core/DispatchKeySet.h>
#include <c10/util/Exception.h>
#include <stdexcept>
namespace c10 {
/**
* This legacy enum class defines the set of backends supported by old school,
* code generated Type-based ATen. A "backend" in this sense roughly
* corresponds to the cartesian product of (device type, layout), but restricted
* only to combinations which we actually have kernels for. Backend does NOT
* include dtype.
*
* The reason we are sunsetting this enum class is because it doesn't allow for
* open registration; e.g., if you want to add SparseXLA, you'd have to
* edit this enum; you wouldn't be able to do it out of tree. DispatchKey is
* the replacement for Backend which supports open registration.
*
* NB: The concept of 'Backend' here disagrees with the notion of backend
* exposed to users in torch.backends. Backend here is something like "CPU"
* or "SparseCUDA"; backend in torch.backends is something like "MKL" or
* "CUDNN".
*/
enum class Backend {
CPU,
CUDA,
HIP,
VE,
FPGA,
IPU,
XPU,
SparseCPU,
SparseCUDA,
SparseCsrCPU,
SparseCsrCUDA,
SparseHIP,
SparseVE,
SparseXPU,
SparsePrivateUse1,
ORT,
XLA,
Vulkan,
Metal,
Meta,
QuantizedCPU,
QuantizedCUDA,
QuantizedXPU,
QuantizedPrivateUse1,
Undefined,
MkldnnCPU,
MPS,
HPU,
Lazy,
MTIA,
PrivateUse1,
NumOptions
};
static inline Backend dispatchKeyToBackend(DispatchKey t) {
if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) {
return Backend::CPU;
} else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) {
return Backend::CUDA;
} else if (t == DispatchKey::HIP) {
return Backend::HIP;
} else if (t == DispatchKey::VE) {
return Backend::VE;
} else if (t == DispatchKey::FPGA) {
return Backend::FPGA;
} else if (t == DispatchKey::ORT) {
return Backend::ORT;
} else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) {
return Backend::XLA;
} else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) {
return Backend::Lazy;
} else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) {
return Backend::MPS;
} else if (t == DispatchKey::Vulkan) {
return Backend::Vulkan;
} else if (t == DispatchKey::Metal) {
return Backend::Metal;
} else if (t == DispatchKey::Meta) {
return Backend::Meta;
} else if (t == DispatchKey::SparseCPU) {
return Backend::SparseCPU;
} else if (t == DispatchKey::SparseCUDA) {
return Backend::SparseCUDA;
} else if (t == DispatchKey::SparseHIP) {
return Backend::SparseHIP;
} else if (t == DispatchKey::SparseVE) {
return Backend::SparseVE;
} else if (t == DispatchKey::SparsePrivateUse1) {
return Backend::SparsePrivateUse1;
} else if (t == DispatchKey::SparseCsrCPU) {
return Backend::SparseCsrCPU;
} else if (t == DispatchKey::SparseCsrCUDA) {
return Backend::SparseCsrCUDA;
} else if (t == DispatchKey::MkldnnCPU) {
return Backend::MkldnnCPU;
} else if (t == DispatchKey::QuantizedCPU) {
return Backend::QuantizedCPU;
} else if (t == DispatchKey::QuantizedCUDA) {
return Backend::QuantizedCUDA;
} else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) {
return Backend::IPU;
} else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) {
return Backend::XPU;
} else if (t == DispatchKey::SparseXPU) {
return Backend::SparseXPU;
} else if (t == DispatchKey::QuantizedXPU) {
return Backend::QuantizedXPU;
} else if (t == DispatchKey::QuantizedPrivateUse1) {
return Backend::QuantizedPrivateUse1;
} else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) {
return Backend::HPU;
} else if (t == DispatchKey::MTIA || t == DispatchKey::AutogradMTIA) {
return Backend::MTIA;
} else if (
t == DispatchKey::PrivateUse1 || t == DispatchKey::AutogradPrivateUse1) {
return Backend::PrivateUse1;
} else if (t == DispatchKey::Undefined) {
return Backend::Undefined;
} else {
TORCH_CHECK(false, "Unrecognized tensor type ID: ", t);
}
}
static inline DispatchKey backendToDispatchKey(Backend b) {
switch (b) {
case Backend::CPU:
return DispatchKey::CPU;
case Backend::CUDA:
return DispatchKey::CUDA;
case Backend::HIP:
return DispatchKey::HIP;
case Backend::VE:
return DispatchKey::VE;
case Backend::FPGA:
return DispatchKey::FPGA;
case Backend::ORT:
return DispatchKey::ORT;
case Backend::XLA:
return DispatchKey::XLA;
case Backend::Lazy:
return DispatchKey::Lazy;
case Backend::IPU:
return DispatchKey::IPU;
case Backend::XPU:
return DispatchKey::XPU;
case Backend::SparseXPU:
return DispatchKey::SparseXPU;
case Backend::SparseCPU:
return DispatchKey::SparseCPU;
case Backend::SparseCUDA:
return DispatchKey::SparseCUDA;
case Backend::SparseHIP:
return DispatchKey::SparseHIP;
case Backend::SparseVE:
return DispatchKey::SparseVE;
case Backend::SparsePrivateUse1:
return DispatchKey::SparsePrivateUse1;
case Backend::SparseCsrCPU:
return DispatchKey::SparseCsrCPU;
case Backend::SparseCsrCUDA:
return DispatchKey::SparseCsrCUDA;
case Backend::MkldnnCPU:
return DispatchKey::MkldnnCPU;
case Backend::Vulkan:
return DispatchKey::Vulkan;
case Backend::Metal:
return DispatchKey::Metal;
case Backend::Meta:
return DispatchKey::Meta;
case Backend::QuantizedCPU:
return DispatchKey::QuantizedCPU;
case Backend::QuantizedCUDA:
return DispatchKey::QuantizedCUDA;
case Backend::QuantizedPrivateUse1:
return DispatchKey::QuantizedPrivateUse1;
case Backend::Undefined:
return DispatchKey::Undefined;
case Backend::MPS:
return DispatchKey::MPS;
case Backend::HPU:
return DispatchKey::HPU;
case Backend::MTIA:
return DispatchKey::MTIA;
case Backend::PrivateUse1:
return DispatchKey::PrivateUse1;
default:
throw std::runtime_error("Unknown backend");
}
}
static inline DeviceType backendToDeviceType(Backend b) {
switch (b) {
case Backend::CPU:
case Backend::MkldnnCPU:
case Backend::SparseCPU:
case Backend::SparseCsrCPU:
case Backend::QuantizedCPU:
return DeviceType::CPU;
case Backend::CUDA:
case Backend::SparseCUDA:
case Backend::QuantizedCUDA:
case Backend::SparseCsrCUDA:
return DeviceType::CUDA;
case Backend::HIP:
return DeviceType::HIP;
case Backend::VE:
return DeviceType::VE;
case Backend::FPGA:
return DeviceType::FPGA;
case Backend::ORT:
return DeviceType::ORT;
case Backend::XLA:
return DeviceType::XLA;
case Backend::Lazy:
return DeviceType::Lazy;
case Backend::SparseHIP:
return DeviceType::HIP;
case Backend::SparseVE:
return DeviceType::VE;
case Backend::IPU:
return DeviceType::IPU;
case Backend::XPU:
case Backend::SparseXPU:
case Backend::QuantizedXPU:
return DeviceType::XPU;
case Backend::Vulkan:
return DeviceType::Vulkan;
case Backend::Metal:
return DeviceType::Metal;
case Backend::Meta:
return DeviceType::Meta;
case Backend::MPS:
return DeviceType::MPS;
case Backend::HPU:
return DeviceType::HPU;
case Backend::MTIA:
return DeviceType::MTIA;
case Backend::PrivateUse1:
case Backend::SparsePrivateUse1:
case Backend::QuantizedPrivateUse1:
return DeviceType::PrivateUse1;
case Backend::Undefined:
TORCH_CHECK(false, "Undefined backend is not a valid device type");
default:
TORCH_CHECK(false, "Unknown backend");
}
}
// TODO: This probably shouldn't actually be static inline
static inline const char* toString(Backend b) {
switch (b) {
case Backend::CPU:
return "CPU";
case Backend::CUDA:
return "CUDA";
case Backend::HIP:
return "HIP";
case Backend::VE:
return "VE";
case Backend::FPGA:
return "FPGA";
case Backend::XPU:
return "XPU";
case Backend::IPU:
return "IPU";
case Backend::ORT:
return "ORT";
case Backend::XLA:
return "XLA";
case Backend::Lazy:
return "Lazy";
case Backend::MPS:
return "MPS";
case Backend::SparseCPU:
return "SparseCPU";
case Backend::SparseCUDA:
return "SparseCUDA";
case Backend::SparseHIP:
return "SparseHIP";
case Backend::SparseVE:
return "SparseVE";
case Backend::SparseXPU:
return "SparseXPU";
case Backend::SparsePrivateUse1:
return "SparsePrivateUse1";
case Backend::SparseCsrCPU:
return "SparseCsrCPU";
case Backend::SparseCsrCUDA:
return "SparseCsrCUDA";
case Backend::MkldnnCPU:
return "MkldnnCPU";
case Backend::Vulkan:
return "Vulkan";
case Backend::Metal:
return "Metal";
case Backend::Meta:
return "Meta";
case Backend::QuantizedCPU:
return "QuantizedCPU";
case Backend::QuantizedCUDA:
return "QuantizedCUDA";
case Backend::QuantizedXPU:
return "QuantizedXPU";
case Backend::QuantizedPrivateUse1:
return "QuantizedPrivateUse1";
case Backend::HPU:
return "HPU";
case Backend::MTIA:
return "MTIA";
case Backend::PrivateUse1:
return "PrivateUseOne";
default:
return "UNKNOWN_BACKEND";
}
}
static inline bool isSparse(Backend b) {
switch (b) {
case Backend::SparseXPU:
case Backend::SparseCPU:
case Backend::SparseCUDA:
case Backend::SparseHIP:
case Backend::SparseVE:
case Backend::SparsePrivateUse1:
return true;
default:
return false;
}
}
static inline bool isSparseCsr(Backend b) {
switch (b) {
case Backend::SparseCsrCPU:
case Backend::SparseCsrCUDA:
return true;
default:
return false;
}
}
} // namespace c10
| 10,056
| 27.652422
| 80
|
h
|
null |
pytorch-main/c10/core/CPUAllocator.h
|
#pragma once
#include <cstring>
#include <mutex>
#include <unordered_map>
#include <c10/core/Allocator.h>
#include <c10/util/Flags.h>
// TODO: rename to c10
C10_DECLARE_bool(caffe2_report_cpu_memory_usage);
namespace c10 {
using MemoryDeleter = void (*)(void*);
// A helper function that is basically doing nothing.
C10_API void NoDelete(void*);
// A simple struct that is used to report C10's memory allocation,
// deallocation status and out-of-memory events to the profiler
class C10_API ProfiledCPUMemoryReporter {
public:
ProfiledCPUMemoryReporter() = default;
void New(void* ptr, size_t nbytes);
void OutOfMemory(size_t nbytes);
void Delete(void* ptr);
private:
std::mutex mutex_;
std::unordered_map<void*, size_t> size_table_;
size_t allocated_ = 0;
size_t log_cnt_ = 0;
};
C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter();
// Get the CPU Allocator.
C10_API at::Allocator* GetCPUAllocator();
// Sets the CPU allocator to the given allocator: the caller gives away the
// ownership of the pointer.
C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0);
// Get the Default CPU Allocator
C10_API at::Allocator* GetDefaultCPUAllocator();
// Get the Default Mobile CPU Allocator
C10_API at::Allocator* GetDefaultMobileCPUAllocator();
// The CPUCachingAllocator is experimental and might disappear in the future.
// The only place that uses it is in StaticRuntime.
// Set the CPU Caching Allocator
C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0);
// Get the CPU Caching Allocator
C10_API Allocator* GetCPUCachingAllocator();
} // namespace c10
| 1,638
| 27.258621
| 77
|
h
|
null |
pytorch-main/c10/core/CompileTimeFunctionPointer.h
|
#pragma once
#include <c10/util/TypeTraits.h>
namespace c10 {
/**
* Represent a function pointer as a C++ type.
* This allows using the function pointer as a type
* in a template and calling it from inside the template
* allows the compiler to inline the call because it
* knows the function pointer at compile time.
*
* Example 1:
* int add(int a, int b) {return a + b;}
* using Add = TORCH_FN_TYPE(add);
* template<class Func> struct Executor {
* int execute(int a, int b) {
* return Func::func_ptr()(a, b);
* }
* };
* Executor<Add> executor;
* EXPECT_EQ(3, executor.execute(1, 2));
*
* Example 2:
* int add(int a, int b) {return a + b;}
* template<class Func> int execute(Func, int a, int b) {
* return Func::func_ptr()(a, b);
* }
* EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2));
*/
template <class FuncType_, FuncType_* func_ptr_>
struct CompileTimeFunctionPointer final {
static_assert(
guts::is_function_type<FuncType_>::value,
"TORCH_FN can only wrap function types.");
using FuncType = FuncType_;
static constexpr FuncType* func_ptr() {
return func_ptr_;
}
};
template <class T>
struct is_compile_time_function_pointer : std::false_type {};
template <class FuncType, FuncType* func_ptr>
struct is_compile_time_function_pointer<
CompileTimeFunctionPointer<FuncType, func_ptr>> : std::true_type {};
} // namespace c10
#define TORCH_FN_TYPE(func) \
::c10::CompileTimeFunctionPointer< \
std::remove_pointer_t<std::remove_reference_t<decltype(func)>>, \
func>
#define TORCH_FN(func) TORCH_FN_TYPE(func)()
| 1,677
| 28.438596
| 72
|
h
|
null |
pytorch-main/c10/core/CopyBytes.h
|
#pragma once
#include <c10/core/Device.h>
namespace c10 {
using CopyBytesFunction = void (*)(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device);
struct C10_API _CopyBytesFunctionRegisterer {
_CopyBytesFunctionRegisterer(
DeviceType from,
DeviceType to,
CopyBytesFunction func_sync,
CopyBytesFunction func_async = nullptr);
};
#define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) \
namespace { \
static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( \
g_copy_function)(from, to, __VA_ARGS__); \
}
/*
* WARNING: Implementations for this function are currently registered from
* ATen and caffe2, not yet from c10. Don't use this if not either ATen
* or caffe2 is present as well.
* We can't move them yet, because the CUDA implementations aren't unified yet
* between ATen and caffe2.
* We're planning to move the implementations into c10/backend/xxx
* to make c10 self contained again.
*/
C10_API void CopyBytes(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device,
bool async);
} // namespace c10
| 1,229
| 26.333333
| 78
|
h
|
null |
pytorch-main/c10/core/DefaultTensorOptions.h
|
#pragma once
#include <c10/core/Backend.h>
#include <c10/core/Device.h>
#include <c10/core/Layout.h>
#include <c10/core/ScalarType.h>
namespace c10 {
struct TensorOptions;
/// Like TensorOptions, but all fields are guaranteed to be filled.
struct DefaultTensorOptions {
DefaultTensorOptions() = default;
caffe2::TypeMeta dtype() const noexcept {
return dtype_;
}
Device device() const noexcept {
return device_;
}
Layout layout() const noexcept {
return layout_;
}
bool requires_grad() const noexcept {
return requires_grad_;
}
// Defined in TensorOptions.h
inline DefaultTensorOptions& merge(const TensorOptions& options);
private:
caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 64-bit
Device device_ = at::kCPU; // 32-bit
Layout layout_ = at::kStrided; // 8-bit
bool requires_grad_ = false; // 8-bit
};
inline const DefaultTensorOptions& getDefaultTensorOptions() {
static const auto options = DefaultTensorOptions();
return options;
}
} // namespace c10
| 1,032
| 21.955556
| 70
|
h
|
null |
pytorch-main/c10/core/Device.h
|
#pragma once
#include <c10/core/DeviceType.h>
#include <c10/macros/Export.h>
#include <c10/util/Exception.h>
#include <cstddef>
#include <functional>
#include <iosfwd>
#include <string>
namespace c10 {
/// An index representing a specific device; e.g., the 1 in GPU 1.
/// A DeviceIndex is not independently meaningful without knowing
/// the DeviceType it is associated; try to use Device rather than
/// DeviceIndex directly.
using DeviceIndex = int8_t;
/// Represents a compute device on which a tensor is located. A device is
/// uniquely identified by a type, which specifies the type of machine it is
/// (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the
/// specific compute device when there is more than one of a certain type. The
/// device index is optional, and in its defaulted state represents (abstractly)
/// "the current device". Further, there are two constraints on the value of the
/// device index, if one is explicitly stored:
/// 1. A negative index represents the current device, a non-negative index
/// represents a specific, concrete device,
/// 2. When the device type is CPU, the device index must be zero.
struct C10_API Device final {
using Type = DeviceType;
/// Constructs a new `Device` from a `DeviceType` and an optional device
/// index.
/* implicit */ Device(DeviceType type, DeviceIndex index = -1)
: type_(type), index_(index) {
validate();
}
/// Constructs a `Device` from a string description, for convenience.
/// The string supplied must follow the following schema:
/// `(cpu|cuda)[:<device-index>]`
/// where `cpu` or `cuda` specifies the device type, and
/// `:<device-index>` optionally specifies a device index.
/* implicit */ Device(const std::string& device_string);
/// Returns true if the type and index of this `Device` matches that of
/// `other`.
bool operator==(const Device& other) const noexcept {
return this->type_ == other.type_ && this->index_ == other.index_;
}
/// Returns true if the type or index of this `Device` differs from that of
/// `other`.
bool operator!=(const Device& other) const noexcept {
return !(*this == other);
}
/// Sets the device index.
void set_index(DeviceIndex index) {
index_ = index;
}
/// Returns the type of device this is.
DeviceType type() const noexcept {
return type_;
}
/// Returns the optional index.
DeviceIndex index() const noexcept {
return index_;
}
/// Returns true if the device has a non-default index.
bool has_index() const noexcept {
return index_ != -1;
}
/// Return true if the device is of CUDA type.
bool is_cuda() const noexcept {
return type_ == DeviceType::CUDA;
}
/// Return true if the device is of PrivateUse1 type.
bool is_privateuseone() const noexcept {
return type_ == DeviceType::PrivateUse1;
}
/// Return true if the device is of MPS type.
bool is_mps() const noexcept {
return type_ == DeviceType::MPS;
}
/// Return true if the device is of HIP type.
bool is_hip() const noexcept {
return type_ == DeviceType::HIP;
}
/// Return true if the device is of VE type.
bool is_ve() const noexcept {
return type_ == DeviceType::VE;
}
/// Return true if the device is of XPU type.
bool is_xpu() const noexcept {
return type_ == DeviceType::XPU;
}
/// Return true if the device is of IPU type.
bool is_ipu() const noexcept {
return type_ == DeviceType::IPU;
}
/// Return true if the device is of XLA type.
bool is_xla() const noexcept {
return type_ == DeviceType::XLA;
}
/// Return true if the device is of HPU type.
bool is_hpu() const noexcept {
return type_ == DeviceType::HPU;
}
/// Return true if the device is of Lazy type.
bool is_lazy() const noexcept {
return type_ == DeviceType::Lazy;
}
/// Return true if the device is of Vulkan type.
bool is_vulkan() const noexcept {
return type_ == DeviceType::Vulkan;
}
/// Return true if the device is of Metal type.
bool is_metal() const noexcept {
return type_ == DeviceType::Metal;
}
/// Return true if the device is of ORT type.
bool is_ort() const noexcept {
return type_ == DeviceType::ORT;
}
/// Return true if the device is of META type.
bool is_meta() const noexcept {
return type_ == DeviceType::Meta;
}
/// Return true if the device is of CPU type.
bool is_cpu() const noexcept {
return type_ == DeviceType::CPU;
}
/// Return true if the device is of PrivateUse1 type.
bool is_privateuse1() const noexcept {
return type_ == DeviceType::PrivateUse1;
}
/// Return true if the device supports arbitrary strides.
bool supports_as_strided() const noexcept {
return type_ != DeviceType::IPU && type_ != DeviceType::XLA &&
type_ != DeviceType::Lazy && type_ != DeviceType::MTIA;
}
/// Same string as returned from operator<<.
std::string str() const;
private:
DeviceType type_;
DeviceIndex index_ = -1;
void validate() {
// Removing these checks in release builds noticeably improves
// performance in micro-benchmarks.
// This is safe to do, because backends that use the DeviceIndex
// have a later check when we actually try to switch to that device.
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
index_ == -1 || index_ >= 0,
"Device index must be -1 or non-negative, got ",
(int)index_);
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
!is_cpu() || index_ <= 0,
"CPU device index must be -1 or zero, got ",
(int)index_);
}
};
C10_API std::ostream& operator<<(std::ostream& stream, const Device& device);
} // namespace c10
namespace std {
template <>
struct hash<c10::Device> {
size_t operator()(c10::Device d) const noexcept {
// Are you here because this static assert failed? Make sure you ensure
// that the bitmasking code below is updated accordingly!
static_assert(sizeof(c10::DeviceType) == 1, "DeviceType is not 8-bit");
static_assert(sizeof(c10::DeviceIndex) == 1, "DeviceIndex is not 8-bit");
// Note [Hazard when concatenating signed integers]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// We must first convert to a same-sized unsigned type, before promoting to
// the result type, to prevent sign extension when any of the values is -1.
// If sign extension occurs, you'll clobber all of the values in the MSB
// half of the resulting integer.
//
// Technically, by C/C++ integer promotion rules, we only need one of the
// uint32_t casts to the result type, but we put in both for explicitness's
// sake.
uint32_t bits = static_cast<uint32_t>(static_cast<uint8_t>(d.type()))
<< 16 |
static_cast<uint32_t>(static_cast<uint8_t>(d.index()));
return std::hash<uint32_t>{}(bits);
}
};
} // namespace std
| 6,883
| 30.87037
| 80
|
h
|
null |
pytorch-main/c10/core/DeviceArray.h
|
#include <c10/core/Allocator.h>
namespace c10 {
template <typename T>
class DeviceArray {
public:
DeviceArray(c10::Allocator& allocator, size_t size)
: data_ptr_(allocator.allocate(size * sizeof(T))) {
static_assert(std::is_trivial<T>::value, "T must be a trivial type");
TORCH_INTERNAL_ASSERT(
0 == (reinterpret_cast<intptr_t>(data_ptr_.get()) % alignof(T)),
"c10::DeviceArray: Allocated memory is not aligned for this data type");
}
T* get() {
return static_cast<T*>(data_ptr_.get());
}
private:
c10::DataPtr data_ptr_;
};
} // namespace c10
| 595
| 22.84
| 80
|
h
|
null |
pytorch-main/c10/core/DeviceGuard.h
|
#pragma once
#include <c10/core/impl/InlineDeviceGuard.h>
namespace c10 {
/// RAII guard that sets a certain default device in its constructor, and
/// changes it back to the device that was originally active upon destruction.
///
/// The device is always reset to the one that was active at the time of
/// construction of the guard. Even if you `set_device` after construction, the
/// destructor will still reset the device to the one that was active at
/// construction time.
///
/// This device guard does NOT have an uninitialized state; it is guaranteed
/// to reset a device on exit. If you are in a situation where you *might*
/// want to setup a guard (i.e., are looking for the moral equivalent
/// of optional<DeviceGuard>), see OptionalDeviceGuard.
class DeviceGuard {
public:
/// No default constructor; see Note [Omitted default constructor from RAII]
explicit DeviceGuard() = delete;
/// Set the current device to the passed Device.
explicit DeviceGuard(Device device) : guard_(device) {}
/// This constructor is for testing only.
explicit DeviceGuard(
Device device,
const impl::DeviceGuardImplInterface* impl)
: guard_(device, impl) {}
/// Copy is disallowed
DeviceGuard(const DeviceGuard&) = delete;
DeviceGuard& operator=(const DeviceGuard&) = delete;
/// Move is disallowed, as DeviceGuard does not have an uninitialized state,
/// which is required for moves on types with nontrivial destructors.
DeviceGuard(DeviceGuard&& other) = delete;
DeviceGuard& operator=(DeviceGuard&& other) = delete;
/// Sets the device to the given one. The specified device must be consistent
/// with the device type originally specified during guard construction.
///
/// TODO: The consistency check here is inconsistent with StreamGuard's
/// behavior with set_stream, where a stream on a different device than
/// the original one isn't an error; we just reset the stream and then
/// switch devices.
void reset_device(at::Device device) {
guard_.reset_device(device);
}
/// This method is for testing only.
void reset_device(
at::Device device,
const impl::DeviceGuardImplInterface* impl) {
guard_.reset_device(device, impl);
}
/// Sets the device index to the given one. The device type is inferred
/// from the original device type the guard was constructed with.
void set_index(DeviceIndex index) {
guard_.set_index(index);
}
/// Returns the device that was set at the time the guard was constructed.
Device original_device() const {
return guard_.original_device();
}
/// Returns the most recent device that was set using this device guard,
/// either from construction, or via set_device.
Device current_device() const {
return guard_.current_device();
}
private:
impl::InlineDeviceGuard<impl::VirtualGuardImpl> guard_;
};
/**
* A OptionalDeviceGuard is an RAII class that sets a device to some value on
* initialization, and resets the device to its original value on destruction.
* Morally, a OptionalDeviceGuard is equivalent to optional<DeviceGuard>, but
* with extra constructors and methods as appropriate.
*
* Besides its obvious use (optionally applying a DeviceGuard),
* OptionalDeviceGuard is often also used for the following idiom:
*
* OptionalDeviceGuard g;
* for (const auto& t : tensors) {
* g.set_device(t.device());
* do_something_with(t);
* }
*
* This usage is marginally more efficient than constructing a DeviceGuard every
* iteration of the for loop, as it avoids an unnecessary device reset.
*
* Unlike DeviceGuard, a OptionalDeviceGuard may be uninitialized. This occurs
* when you use the nullary constructor, or pass a nullopt to the constructor.
* Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the
* original device was and they do not reset on destruction. This is why
* original_device() and current_device() return optional<Device> rather than
* Device (as they do in DeviceGuard), and also is why we didn't just
* provide OptionalDeviceGuard by default and hide DeviceGuard from users.
*
* The semantics of an OptionalDeviceGuard are exactly explained by thinking
* of it as an optional<DeviceGuard>. In particular, an initialized
* OptionalDeviceGuard doesn't restore device to its value at construction; it
* restores device to its value *at initialization*. So if you have the
* program:
*
* setDevice(1);
* OptionalDeviceGuard g;
* setDevice(2);
* g.reset_device(Device(DeviceType::CUDA, 3)); // initializes!
*
* On destruction, g will reset device to 2, rather than 1.
*
* An uninitialized OptionalDeviceGuard is distinct from a (initialized)
* DeviceGuard whose original_device_ and current_device_ match, since the
* DeviceGuard will still reset the device to original_device_.
*/
class OptionalDeviceGuard {
public:
/// Create an uninitialized guard. Set the guard later using reset_device.
explicit OptionalDeviceGuard() = default;
/// Initialize the guard, setting the current device to the passed Device.
explicit OptionalDeviceGuard(Device device) : guard_(device) {}
/// Initialize the guard if a Device is passed; otherwise leave the
/// guard uninitialized.
explicit OptionalDeviceGuard(optional<Device> device) : guard_(device) {}
/// Constructor for testing only.
explicit OptionalDeviceGuard(
Device device,
const impl::DeviceGuardImplInterface* impl)
: guard_(device, impl) {}
/// Copy is disallowed
OptionalDeviceGuard(const OptionalDeviceGuard&) = delete;
OptionalDeviceGuard& operator=(const OptionalDeviceGuard&) = delete;
/// Move is disallowed
/// See Note [Explicit initialization of optional fields]
/// and // Note [Move construction for RAII guards is tricky]
/// for rationale.
OptionalDeviceGuard(OptionalDeviceGuard&& other) = delete;
OptionalDeviceGuard& operator=(OptionalDeviceGuard&& other) = delete;
/// Sets the device to the given one. The specified device must be consistent
/// with the device type originally specified during guard construction.
void reset_device(at::Device device) {
guard_.reset_device(device);
}
/// For testing only
void reset_device(
at::Device device,
const impl::DeviceGuardImplInterface* impl) {
guard_.reset_device(device, impl);
}
/// Returns the device that was set at the time the guard was constructed.
optional<Device> original_device() const {
return guard_.original_device();
}
/// Returns the most recent device that was set using this device guard,
/// either from construction, or via reset_device.
optional<Device> current_device() const {
return guard_.current_device();
}
private:
impl::InlineOptionalDeviceGuard<impl::VirtualGuardImpl> guard_{};
};
// Note [Whither the DeviceGuard boilerplate]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Design note: in principle, we could avoid these wrappers using:
//
// using DeviceGuard = impl::InlineDeviceGuard<impl::VirtualGuardImpl>;
// using OptionalDeviceGuard =
// impl::InlineOptionalDeviceGuard<impl::VirtualGuardImpl>;
//
// But the error messages are worse, and our users can't just look at the
// header file to find out what's going on. Furthermore, for specializations
// like CUDAStreamGuard, it can be profitable to replace some interfaces with
// refined types (e.g., return CUDAStream instead of Stream). So, we eat
// the boilerplate and write out the API explicitly.
} // namespace c10
| 7,554
| 37.545918
| 80
|
h
|
null |
pytorch-main/c10/core/DeviceType.h
|
#pragma once
// This is directly synchronized with caffe2/proto/caffe2.proto, but
// doesn't require me to figure out how to get Protobuf headers into
// ATen/core (which would require a lot more build system hacking.)
// If you modify me, keep me synchronized with that file.
#include <c10/macros/Export.h>
#include <functional>
#include <ostream>
namespace c10 {
// These contains all device types that also have a BackendComponent
// and therefore participate in per-backend functionality dispatch keys.
// This is most backends except PrivateUse2 and PrivateUse3
#define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) \
_(CPU, extra) \
_(CUDA, extra) \
_(HIP, extra) \
_(XLA, extra) \
_(MPS, extra) \
_(IPU, extra) \
_(XPU, extra) \
_(HPU, extra) \
_(VE, extra) \
_(Lazy, extra) \
_(Meta, extra) \
_(MTIA, extra) \
_(PrivateUse1, extra)
enum class DeviceType : int8_t {
CPU = 0,
CUDA = 1, // CUDA.
MKLDNN = 2, // Reserved for explicit MKLDNN
OPENGL = 3, // OpenGL
OPENCL = 4, // OpenCL
IDEEP = 5, // IDEEP.
HIP = 6, // AMD HIP
FPGA = 7, // FPGA
ORT = 8, // ONNX Runtime / Microsoft
XLA = 9, // XLA / TPU
Vulkan = 10, // Vulkan
Metal = 11, // Metal
XPU = 12, // XPU
MPS = 13, // MPS
Meta = 14, // Meta (tensors with no data)
HPU = 15, // HPU / HABANA
VE = 16, // SX-Aurora / NEC
Lazy = 17, // Lazy Tensors
IPU = 18, // Graphcore IPU
MTIA = 19, // Meta training and inference devices
PrivateUse1 = 20, // PrivateUse1 device
// NB: If you add more devices:
// - Change the implementations of DeviceTypeName and isValidDeviceType
// in DeviceType.cpp
// - Change the number below
COMPILE_TIME_MAX_DEVICE_TYPES = 21,
};
constexpr DeviceType kCPU = DeviceType::CPU;
constexpr DeviceType kCUDA = DeviceType::CUDA;
constexpr DeviceType kHIP = DeviceType::HIP;
constexpr DeviceType kFPGA = DeviceType::FPGA;
constexpr DeviceType kORT = DeviceType::ORT;
constexpr DeviceType kXLA = DeviceType::XLA;
constexpr DeviceType kMPS = DeviceType::MPS;
constexpr DeviceType kMeta = DeviceType::Meta;
constexpr DeviceType kVulkan = DeviceType::Vulkan;
constexpr DeviceType kMetal = DeviceType::Metal;
constexpr DeviceType kXPU = DeviceType::XPU;
constexpr DeviceType kHPU = DeviceType::HPU;
constexpr DeviceType kVE = DeviceType::VE;
constexpr DeviceType kLazy = DeviceType::Lazy;
constexpr DeviceType kIPU = DeviceType::IPU;
constexpr DeviceType kMTIA = DeviceType::MTIA;
constexpr DeviceType kPrivateUse1 = DeviceType::PrivateUse1;
// define explicit int constant
constexpr int COMPILE_TIME_MAX_DEVICE_TYPES =
static_cast<int>(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES);
static_assert(
COMPILE_TIME_MAX_DEVICE_TYPES <= 21,
"Hey! You seem to be adding a lot of new DeviceTypes. The intent was "
"for this constant to reflect the actual number of DeviceTypes we support "
"in PyTorch; it's important that this number is not too large as we "
"use this to allocate stack arrays in some places in our code. If you "
"are indeed just adding the 20th device type, feel free to change "
"the check to 32; but if you are adding some sort of extensible device "
"types registration, please be aware that you are affecting code that "
"this number is small. Try auditing uses of this constant.");
C10_API std::string DeviceTypeName(DeviceType d, bool lower_case = false);
C10_API bool isValidDeviceType(DeviceType d);
C10_API std::ostream& operator<<(std::ostream& stream, DeviceType type);
C10_API void register_privateuse1_backend(std::string backend_name);
C10_API std::string get_privateuse1_backend(bool lower_case = true);
} // namespace c10
namespace std {
template <>
struct hash<c10::DeviceType> {
std::size_t operator()(c10::DeviceType k) const {
return std::hash<int>()(static_cast<int>(k));
}
};
} // namespace std
namespace torch {
using c10::DeviceType;
}
| 4,297
| 35.423729
| 79
|
h
|
null |
pytorch-main/c10/core/DynamicCast.h
|
#pragma once
#include <c10/core/ScalarType.h>
#include <c10/macros/Macros.h>
#include <c10/util/Load.h>
#include <c10/util/TypeCast.h>
namespace c10 {
// Dynamic type casting utils:
// - fetch_and_cast
// - cast_and_store
//
// fetch_and_cast fetch a value with dynamic type specified by a ScalarType
// from a void pointer and cast it to a static type.
//
// cast_and_store casts a static typed value into dynamic type specified
// by a ScalarType, and store it into a void pointer.
//
// NOTE:
//
// Dynamic casting allows us to support type promotion without blowing up
// the combination space: For example, without dynamic cast, in order to
// implement `add_` with type promotion, we would need something like
//
// AT_DISPATCH_ALL_TYPES(output.dtype(),
// AT_DISPATCH_ALL_TYPES(input1.dtype(),
// AT_DISPATCH_ALL_TYPES(input2.dtype(),
// [](arg0_t a, arg1_t b) -> out_t { return a + b; }
// )
// )
// )
//
// If we support N dtypes, the above code would generate the a+b kernel for
// all the N * N * N different supported types, the compilation time and
// binary size would become horrible.
//
// Dynamic casting might sounds like a bad idea in terms of performance.
// Especially if you ever do it in a loop, you are going to do a billion tests.
// But in practice it is not as bad as it might look:
//
// - on CPU, this is a branch that always has the same outcome, therefore
// hopefully the branch predictor could do the job pretty well
// - on GPU, these branches will not diverge, so we could still have the same
// warp executing the same line of code
// - Most kernels, like `add`, are bandwidth bound, adding a few clock cycles to
// check an integer does not hurt the performance much because the ALUs would
// wait for load instructions anyway.
//
// For the discussion and benchmark, refer to:
// - https://github.com/pytorch/pytorch/pull/28343
// - https://github.com/pytorch/pytorch/pull/28344
// - https://github.com/pytorch/pytorch/pull/28345
//
#ifdef C10_HOST_DEVICE
#define ERROR_UNSUPPORTED_CAST CUDA_KERNEL_ASSERT(false);
#else
#define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type");
#endif
// Fetch a value with dynamic type src_type from ptr, and cast it to static type
// dest_t.
#define FETCH_AND_CAST_CASE(type, scalartype) \
case ScalarType::scalartype: \
return c10::convert<dest_t>(c10::load<type>(ptr));
template <typename dest_t>
C10_HOST_DEVICE inline dest_t fetch_and_cast(
const ScalarType src_type,
const void* ptr) {
switch (src_type) {
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(FETCH_AND_CAST_CASE)
default:
ERROR_UNSUPPORTED_CAST
}
return dest_t(0); // just to avoid compiler warning
}
// Cast a value with static type src_t into dynamic dest_type, and store it to
// ptr.
#define CAST_AND_STORE_CASE(type, scalartype) \
case ScalarType::scalartype: \
*(type*)ptr = c10::convert<type>(value); \
return;
template <typename src_t>
C10_HOST_DEVICE inline void cast_and_store(
const ScalarType dest_type,
void* ptr,
src_t value) {
switch (dest_type) {
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(CAST_AND_STORE_CASE)
default:;
}
ERROR_UNSUPPORTED_CAST
}
#define DEFINE_UNCASTABLE(T, scalartype_) \
template <> \
C10_HOST_DEVICE inline T fetch_and_cast<T>( \
const ScalarType src_type, const void* ptr) { \
CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \
return c10::load<T>(ptr); \
} \
template <> \
C10_HOST_DEVICE inline void cast_and_store<T>( \
const ScalarType dest_type, void* ptr, T value) { \
CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \
*(T*)ptr = value; \
}
AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE)
#undef FETCH_AND_CAST_CASE
#undef CAST_AND_STORE_CASE
#undef DEFINE_UNCASTABLE
#undef ERROR_UNSUPPORTED_CAST
} // namespace c10
| 4,192
| 33.941667
| 80
|
h
|
null |
pytorch-main/c10/core/Event.h
|
#pragma once
#include <c10/core/impl/InlineEvent.h>
#include <c10/core/impl/VirtualGuardImpl.h>
namespace c10 {
/**
* A backend-generic movable, not copyable, not thread-safe event.
*
* The design of this event follows that of CUDA and HIP events. These events
* are recorded and waited on by streams and can be rerecorded to,
* each rerecording essentially creating a new version of the event.
* For example, if (in CPU time), stream X is asked to record E,
* stream Y waits on E, and stream X is asked to record E again, then Y will
* wait for X to finish the first call to record and not the second, because
* it's waiting on the first version of event E, not the second.
* Querying an event only returns the status of its most recent version.
*
* Backend-generic events are implemented by this class and
* impl::InlineEvent. In addition to these events there are also
* some backend-specific events, like ATen's CUDAEvent. Each of these
* classes has its own use.
*
* impl::InlineEvent<...> or a backend-specific event should be
* preferred when the backend is known at compile time and known to
* be compiled. Backend-specific events may have additional functionality.
*
* This Event should be used if a particular backend may not be available,
* or the backend required is not known at compile time.
*
* These generic events are built on top of DeviceGuardImpls, analogous
* to DeviceGuard and InlineDeviceGuard. The name "DeviceGuardImpls,"
* is no longer entirely accurate, as these classes implement the
* backend-specific logic for a generic backend interface.
*
* See DeviceGuardImplInterface.h for a list of all supported flags.
*/
struct Event final {
// Constructors
Event() = delete;
Event(
const DeviceType _device_type,
const EventFlag _flag = EventFlag::PYTORCH_DEFAULT)
: impl_{_device_type, _flag} {}
// Copy constructor and copy assignment operator (deleted)
Event(const Event&) = delete;
Event& operator=(const Event&) = delete;
// Move constructor and move assignment operator
Event(Event&& other) noexcept : impl_{std::move(other.impl_)} {}
Event& operator=(Event&& other) noexcept {
impl_.swap(std::move(other.impl_));
return *this;
}
// Destructor
~Event() = default;
// Getters
Device device() const noexcept {
return Device(device_type(), device_index());
}
DeviceType device_type() const noexcept {
return impl_.device_type();
}
DeviceIndex device_index() const noexcept {
return impl_.device_index();
}
EventFlag flag() const noexcept {
return impl_.flag();
}
bool was_marked_for_recording() const noexcept {
return impl_.was_marked_for_recording();
}
/**
* Calls record() if and only if record() has never been called for this
* event. Note: because Event is not thread-safe recordOnce() may call
* record() multiple times if called from multiple threads.
*/
void recordOnce(const Stream& stream) {
impl_.recordOnce(stream);
}
/**
* Increments the event's version and enqueues a job with this version
* in the stream's work queue. When the stream process that job
* it notifies all streams waiting on / blocked by that version of the
* event to continue and marks that version as recorded.
* */
void record(const Stream& stream) {
impl_.record(stream);
}
/**
* Does nothing if the event has not been scheduled to be recorded.
* If the event was previously enqueued to be recorded, a command
* to wait for the version of the event that exists at the time of this call
* is inserted in the stream's work queue.
* When the stream reaches this command it will stop processing
* additional commands until that version of the event is marked as recorded.
*/
void block(const Stream& stream) const {
impl_.block(stream);
}
/**
* Returns true if (and only if)
* (1) the event has never been scheduled to be recorded
* (2) the current version is marked as recorded.
* Returns false otherwise.
*/
bool query() const {
return impl_.query();
}
private:
impl::InlineEvent<impl::VirtualGuardImpl> impl_;
};
} // namespace c10
| 4,187
| 32.504
| 79
|
h
|
null |
pytorch-main/c10/core/GeneratorImpl.h
|
#pragma once
#include <stdint.h>
#include <mutex>
#include <c10/core/Device.h>
#include <c10/core/DispatchKeySet.h>
#include <c10/core/TensorImpl.h>
#include <c10/macros/Export.h>
#include <c10/util/intrusive_ptr.h>
#include <c10/util/python_stub.h>
/**
* Note [Generator]
* ~~~~~~~~~~~~~~~~
* A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm
* to generate a seemingly random sequence of numbers, that may be later be used
* in creating a random distribution. Such an engine almost always maintains a
* state and requires a seed to start off the creation of random numbers. Often
* times, users have found it beneficial to be able to explicitly create,
* retain, and destroy PRNG states and also be able to have control over the
* seed value.
*
* A Generator in ATen gives users the ability to read, write and modify a PRNG
* engine. For instance, it does so by letting users seed a PRNG engine, fork
* the state of the engine, etc.
*
* By default, there is one generator per device, and a device's generator is
* lazily created. A user can use the torch.Generator() api to create their own
* generator. Currently torch.Generator() can only create a CPUGeneratorImpl.
*/
/**
* Note [Acquire lock when using random generators]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Generator and its derived classes are NOT thread-safe. Please note that most
* of the places where we have inserted locking for generators are historically
* based, and we haven't actually checked that everything is truly thread safe
* (and it probably isn't). Please use the public mutex_ when using any methods
* from these classes, except for the read-only methods. You can learn about the
* usage by looking into the unittests (aten/src/ATen/cpu_generator_test.cpp)
* and other places where we have used lock_guard.
*
* TODO: Look into changing the threading semantics of Generators in ATen (e.g.,
* making them non-thread safe and instead making the generator state
* splittable, to accommodate forks into other threads).
*/
namespace c10 {
// The default seed is selected to be a large number
// with good distribution of 0s and 1s in bit representation
constexpr uint64_t default_rng_seed_val = 67280421310721;
struct C10_API GeneratorImpl : public c10::intrusive_ptr_target {
// Constructors
GeneratorImpl(Device device_in, DispatchKeySet key_set);
// Delete all copy and move assignment in favor of clone()
// method
GeneratorImpl(const GeneratorImpl& other) = delete;
GeneratorImpl(GeneratorImpl&& other) = delete;
GeneratorImpl& operator=(const GeneratorImpl& other) = delete;
~GeneratorImpl() override = default;
c10::intrusive_ptr<GeneratorImpl> clone() const;
// Common methods for all generators
virtual void set_current_seed(uint64_t seed) = 0;
virtual void set_offset(uint64_t offset) = 0;
virtual uint64_t get_offset() const = 0;
virtual uint64_t current_seed() const = 0;
virtual uint64_t seed() = 0;
virtual void set_state(const c10::TensorImpl& new_state) = 0;
virtual c10::intrusive_ptr<c10::TensorImpl> get_state() const = 0;
Device device() const;
// See Note [Acquire lock when using random generators]
std::mutex mutex_;
DispatchKeySet key_set() const {
return key_set_;
}
inline void set_pyobj(PyObject* pyobj) noexcept {
pyobj_ = pyobj;
}
inline PyObject* pyobj() const noexcept {
return pyobj_;
}
protected:
Device device_;
DispatchKeySet key_set_;
PyObject* pyobj_ = nullptr;
virtual GeneratorImpl* clone_impl() const = 0;
};
namespace detail {
C10_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
} // namespace detail
} // namespace c10
| 3,700
| 33.268519
| 80
|
h
|
null |
pytorch-main/c10/core/GradMode.h
|
#pragma once
#include <c10/core/AutogradState.h>
#include <c10/macros/Export.h>
namespace c10 {
struct C10_API GradMode {
static bool is_enabled();
static void set_enabled(bool enabled);
};
// A RAII, thread local (!) guard that enables or disables grad mode upon
// construction, and sets it back to the original value upon destruction.
struct C10_API AutoGradMode {
AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) {
GradMode::set_enabled(enabled);
}
~AutoGradMode() {
GradMode::set_enabled(prev_mode);
}
bool prev_mode;
};
// A RAII, thread local (!) guard that stops future operations from building
// gradients.
struct C10_API NoGradGuard : public AutoGradMode {
NoGradGuard() : AutoGradMode(/*enabled=*/false) {}
};
// A RAII, thread local (!) guard that enables or disables forward grad mode
// upon construction, and sets it back to the original value upon destruction.
struct C10_API AutoFwGradMode {
AutoFwGradMode(bool enabled)
: prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) {
AutogradState::get_tls_state().set_fw_grad_mode(enabled);
}
~AutoFwGradMode() {
AutogradState::get_tls_state().set_fw_grad_mode(prev_mode);
}
bool prev_mode;
};
} // namespace c10
| 1,253
| 26.866667
| 78
|
h
|
null |
pytorch-main/c10/core/InferenceMode.h
|
#pragma once
#include <c10/core/AutogradState.h>
#include <c10/core/impl/LocalDispatchKeySet.h>
#include <c10/macros/Export.h>
namespace c10 {
// A RAII, thread local (!) guard that enables or disables inference mode upon
// construction, and sets it back to the original value upon destruction.
struct C10_API InferenceMode {
// Note [Expected TLS state in InferenceMode]:
// InferenceMode: ADInplaceOrView not in
// raw_local_dispatch_key_set.included(),
// Autograd in raw_local_dispatch_key_set.excluded()
// GradMode is disabled.
// NormalMode: ADInplaceOrView in raw_local_dispatch_key_set.included(),
// Autograd not in raw_local_dispatch_key_set.excluded()
// GradMode is enabled by default unless toggled manually
// through other APIs, e.g. NoGradGuard.
//
// Invariant:
// - ADInplaceOrView is never in the excluded set
// - Autograd is never in the included set
// - Setting InferenceMode will set GradMode accordingly, but not vice versa.
//
// 1. Why do we put ADInplaceOrView in included set outside InferenceMode?
//
// Inplace update to inference tensor outside InferenceMode is not
// allowed. See Note [Inplace update inference tensor] for more details.
// Without going through ADInplaceOrView kernel, we cannot throw error
// for `inference_tensor.add_(1)` case.
//
// 2. Why not put ADInplaceOrView in the excluded set inside InferenceMode?
//
// For example:
// torch::Tensor a = torch::ones({1, 2, 3}).set_requires_grad(true);
// torch::Tensor k = a + 2;
// {
// c10::InferenceMode guard(true);
// k.add_(2);
// }
// `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's
// prepared for future autograd.
//
// 3. Why does setting InferenceMode also set GradMode?
//
// This is required since InferenceMode is a faster and more restrictive
// version of NoGradGuard. All runtime checks using GradMode::is_enabled()
// are applicable to InferenceMode as well, e.g.
// `tensorTypeInCurrentExecutionContext` in interpreter.cpp.
InferenceMode(bool enabled = true)
: prev_mode(AutogradState::get_tls_state()),
prev_keyset(c10::impl::tls_local_dispatch_key_set()) {
// Enabling inference mode means disabling grad modes
// And disabling inference mode means enabling grad modes
AutogradState::set_tls_state(AutogradState(
/* grad_mode */ !enabled,
/* inference_mode */ enabled,
/* fw_grad_mode */ !enabled,
/* multithreading_enabled*/ !enabled));
DispatchKeySet included = enabled
? prev_keyset.included_.remove(c10::DispatchKey::ADInplaceOrView)
: prev_keyset.included_.add(c10::DispatchKey::ADInplaceOrView);
DispatchKeySet excluded = enabled
? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset)
: (prev_keyset.excluded_ - c10::autograd_dispatch_keyset);
c10::impl::PODLocalDispatchKeySet cur_keyset{};
cur_keyset.set_included(included);
cur_keyset.set_excluded(excluded);
c10::impl::_force_tls_local_dispatch_key_set(cur_keyset);
}
~InferenceMode() {
AutogradState::set_tls_state(prev_mode);
c10::impl::_force_tls_local_dispatch_key_set(prev_keyset);
}
static bool is_enabled();
private:
AutogradState prev_mode;
c10::impl::LocalDispatchKeySet prev_keyset;
};
} // namespace c10
| 3,487
| 40.035294
| 80
|
h
|
null |
pytorch-main/c10/core/LargeNegativeIntSymNodeImpl.h
|
#include <c10/core/SymNodeImpl.h>
namespace c10 {
// Represents an otherwise unrepresentable large negative integer constant.
// Unlike other SymNodeImpl, this cannot be "dispatched" conventionally,
// as it typically needs to defer to another SymNodeImpl
class C10_API LargeNegativeIntSymNodeImpl : public SymNodeImpl {
public:
LargeNegativeIntSymNodeImpl(int64_t val) : val_(val) {}
bool is_int() override {
return true;
};
bool is_bool() override {
return false;
};
bool is_float() override {
return false;
};
int64_t guard_int(const char* file, int64_t line) override {
return val_;
};
bool guard_bool(const char* file, int64_t line) override {
TORCH_CHECK(false, "not a bool");
};
double guard_float(const char* file, int64_t line) override {
TORCH_CHECK(false, "not a float");
};
int64_t int_() override {
return true;
};
bool bool_() override {
return false;
};
bool has_hint() override {
return true;
};
std::string str() override {
return std::to_string(val_);
};
int64_t large_negative_int() override {
return val_;
}
private:
int64_t val_;
};
} // namespace c10
| 1,172
| 22
| 75
|
h
|
null |
pytorch-main/c10/core/Layout.h
|
#pragma once
#include <c10/core/Backend.h>
#include <c10/util/Exception.h>
#include <ostream>
namespace c10 {
enum class Layout : int8_t {
Strided,
Sparse,
SparseCsr,
Mkldnn,
SparseCsc,
SparseBsr,
SparseBsc,
NumOptions
};
constexpr auto kStrided = Layout::Strided;
constexpr auto kSparse = Layout::Sparse;
constexpr auto kSparseCsr = Layout::SparseCsr;
constexpr auto kMkldnn = Layout::Mkldnn;
constexpr auto kSparseCsc = Layout::SparseCsc;
constexpr auto kSparseBsr = Layout::SparseBsr;
constexpr auto kSparseBsc = Layout::SparseBsc;
inline Layout layout_from_backend(Backend backend) {
switch (backend) {
case Backend::SparseCPU:
case Backend::SparseCUDA:
case Backend::SparseHIP:
case Backend::SparseVE:
case Backend::SparseXPU:
return Layout::Sparse;
case Backend::MkldnnCPU:
return Layout::Mkldnn;
case Backend::SparseCsrCPU:
case Backend::SparseCsrCUDA:
TORCH_CHECK(
false,
"Cannot map Backend SparseCsrCPU|SparseCsrCUDA to a unique layout.");
default:
return Layout::Strided;
}
}
inline std::ostream& operator<<(std::ostream& stream, at::Layout layout) {
switch (layout) {
case at::kStrided:
return stream << "Strided";
case at::kSparse:
return stream << "Sparse";
case at::kSparseCsr:
return stream << "SparseCsr";
case at::kSparseCsc:
return stream << "SparseCsc";
case at::kSparseBsr:
return stream << "SparseBsr";
case at::kSparseBsc:
return stream << "SparseBsc";
case at::kMkldnn:
return stream << "Mkldnn";
default:
TORCH_CHECK(false, "Unknown layout");
}
}
} // namespace c10
| 1,682
| 23.042857
| 79
|
h
|
null |
pytorch-main/c10/core/MemoryFormat.h
|
#pragma once
#include <c10/core/Backend.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/Exception.h>
#include <ostream>
// Memory format is not the property of a Tensor. It is the way to tell an
// operator how the result should be organized in memory and nothing more. That
// means memory format should never be used as return value for any tensor state
// interrogation functions (internally and externally).
//
// Possible options are:
// Preserve:
// If any of the input tensors is in channels_last format, operator output
// should be in channels_last format
//
// Contiguous:
// Regardless of input tensors format, the output should be contiguous
// Tensor.
//
// ChannelsLast:
// Regardless of input tensors format, the output should be in channels_last
// format.
namespace c10 {
enum class MemoryFormat : int8_t {
Contiguous,
Preserve,
ChannelsLast,
ChannelsLast3d,
NumOptions
};
// If you are seeing this, it means that this call site was not checked if
// the memory format could be preserved, and it was switched to old default
// behaviour of contiguous
#define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format()
inline MemoryFormat get_contiguous_memory_format() {
return MemoryFormat::Contiguous;
}
inline std::ostream& operator<<(
std::ostream& stream,
at::MemoryFormat memory_format) {
switch (memory_format) {
case MemoryFormat::Preserve:
return stream << "Preserve";
case MemoryFormat::Contiguous:
return stream << "Contiguous";
case MemoryFormat::ChannelsLast:
return stream << "ChannelsLast";
case MemoryFormat::ChannelsLast3d:
return stream << "ChannelsLast3d";
default:
TORCH_CHECK(false, "Unknown memory format ", memory_format);
}
}
// Note: Hardcoded the channel last stride indices here to get better
// performance
template <typename T>
inline std::vector<T> get_channels_last_strides_2d(ArrayRef<T> sizes) {
std::vector<T> strides(sizes.size());
switch (sizes.size()) {
case 4:
strides[1] = 1;
strides[3] = sizes[1];
strides[2] = strides[3] * sizes[3];
strides[0] = strides[2] * sizes[2];
return strides;
case 3:
strides[0] = 1;
strides[2] = sizes[0];
strides[1] = strides[2] * sizes[2];
return strides;
default:
TORCH_INTERNAL_ASSERT(
false, "ChannelsLast2d doesn't support size ", sizes.size());
}
}
inline std::vector<int64_t> get_channels_last_strides_2d(IntArrayRef sizes) {
return get_channels_last_strides_2d<int64_t>(sizes);
}
template <typename T>
std::vector<T> get_channels_last_strides_3d(ArrayRef<T> sizes) {
std::vector<T> strides(sizes.size());
switch (sizes.size()) {
case 5:
strides[1] = 1;
strides[4] = sizes[1];
strides[3] = strides[4] * sizes[4];
strides[2] = strides[3] * sizes[3];
strides[0] = strides[2] * sizes[2];
return strides;
case 4:
strides[0] = 1;
strides[3] = sizes[0];
strides[2] = strides[3] * sizes[3];
strides[1] = strides[2] * sizes[2];
return strides;
default:
TORCH_INTERNAL_ASSERT(
false, "ChannelsLast3d doesn't support size ", sizes.size());
}
}
inline std::vector<int64_t> get_channels_last_strides_3d(IntArrayRef sizes) {
return get_channels_last_strides_3d<int64_t>(sizes);
}
// NOTE:
// Below are Helper functions for is_channels_last_strides_xd.
// 1. Please do not combine these helper functions, each helper function handles
// exactly one case of sizes + memory_format, by doing this, the strides indices
// will be a constant array and we can access it using constant index number,
// the compiler will fully unroll the loop on strides indices to gain a better
// performance.
// 2. No error check in helper function, caller ensures the correctness of the
// input
// 3. All helper functions have similar comments, only 1st helper function is
// commented here.
template <typename T>
inline bool is_channels_last_strides_2d_s4(
const ArrayRef<T> sizes,
const ArrayRef<T> strides) {
T min = 0;
// special case for trivial C dimension. default to NCHW
if (strides[1] == 0) {
return false;
}
// loop strides indices
for (auto& d : {1, 3, 2, 0}) {
if (sizes[d] == 0) {
return false;
}
if (strides[d] < min) {
return false;
}
// Fallback to NCHW as default layout for ambiguous cases
// This is the flaw of implicit memory_format from strides.
// N111 tensor with identical strides for size 1 dimension;
// Two cases could lead us here:
// a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1])
// b. N11W contiguous Tensor sliced on the W-dimension.
// ([N,1,1,1]@[W,W,W,W])
if (d == 0 && min == strides[1]) {
return false;
}
// This is necessary to:
// 1. distinguish the memory_format of N1H1;
// [H, 1, 1, 1] channels_last stride
// [H, H, 1, 1] contiguous stride
// 2. permutation of 1C1W:
// [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3)
// [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as channels_last
min = strides[d];
if (sizes[d] > 1) {
min *= sizes[d];
}
}
return true;
}
template <typename T>
inline bool is_channels_last_strides_3d_s5(
const ArrayRef<T> sizes,
const ArrayRef<T> strides) {
T min = 0;
if (strides[1] == 0) {
return false;
}
for (auto& d : {1, 4, 3, 2, 0}) {
if (sizes[d] == 0) {
return false;
}
if (strides[d] < min) {
return false;
}
if (d == 0 && min == strides[1]) {
return false;
}
min = strides[d];
if (sizes[d] > 1) {
min *= sizes[d];
}
}
return true;
}
// Note [Ambiguous is_channels_last_strides_xd]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// The flaw of carrying memory_format implicitly through strides is very hard
// to WAR properly. issue #24090
// Without the history of permutation, we can't infer the memory_format of a
// tensor from the snapshot of its size & stride
// e.g.
//
// 1. We can NOT specify the memory_format of N111 tensor through strides in a
// meaningful way;
//
// 2. Two path that ended up with identical size/stride
// N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W]
// NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C]
// So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer
// the memory_format of the original tensor.
//
// Due to the limitations, our temporary WAR `is_channels_last_strides` does the
// best effort to infer whether the original memory_format of a tensor is
// at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered
// by their importance):
// 1. Ensure that normal shape manipulation does not accidentally change the
// MemoryFormat of an existing tensor.
// 2. Allows user to mark MemoryFormat::ChannelsLast to tensors;
//
// The function does so via checking strides of the tensor, including strides of
// size-1 dimensions. Although conventionally PyTorch implies no restriction on
// trivial stride (stride for size-1 dimension).
//
// Note that this approach is a compromise. We did not solve the problem
// completely. Many cases we will not be able to infer the correct memory
// format.
// The implementation of `is_channels_last_strides` is to serve the objectives:
// MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental
// conversion); Best effort to maintain the ChannelsLast flag.
//
// Due to the fact that this is not a bulletproof solution, through testing
// (aten/src/ATen/test/memory_format_test.cpp)
// a. we ensure that the common tasks are supported;
// a. we identify corner cases where the implementation compromises on.
//
// By the time accumulated permutation is enabled to replace implicit
// memory_format through strides, we should be updating our tests and fix the
// issues in our tests.
//
// We use Channels Last 2d as an example above.
// This is a general problem for all the is_channels_last_strides_xd
// implementation. Please check the helper functions
// (is_channels_last_strides_*d_s*) for more details.
template <typename T>
inline bool is_channels_last_strides_2d(
const ArrayRef<T> sizes,
const ArrayRef<T> strides) {
switch (sizes.size()) {
case 4:
return is_channels_last_strides_2d_s4(sizes, strides);
case 3:
// TODO dim == 3 case will be enabled once it is fully tested
return false;
default:
return false;
}
}
template <typename T>
inline bool is_channels_last_strides_3d(
const ArrayRef<T> sizes,
const ArrayRef<T> strides) {
switch (sizes.size()) {
case 5:
return is_channels_last_strides_3d_s5(sizes, strides);
case 4:
// TODO dim == 4 case will be enabled once it is fully tested
return false;
default:
return false;
}
}
inline bool is_channels_last_strides_2d(
const IntArrayRef sizes,
const IntArrayRef strides) {
return is_channels_last_strides_2d<int64_t>(sizes, strides);
}
inline bool is_channels_last_strides_3d(
const IntArrayRef sizes,
const IntArrayRef strides) {
return is_channels_last_strides_3d<int64_t>(sizes, strides);
}
} // namespace c10
| 9,300
| 31.295139
| 80
|
h
|
null |
pytorch-main/c10/core/PyHandleCache.h
|
#pragma once
#include <c10/core/impl/PyInterpreter.h>
#include <c10/macros/Macros.h>
#include <c10/util/python_stub.h>
#include <atomic>
namespace c10 {
// A PyHandleCache represents a cached pointer from a C++ object to
// a Python object that represents that object analogously in Python.
// Upon a cache hit, the relevant object can be retrieved after a test
// and then a memory load. Two conditions must hold to be able to use this
// class:
//
// - This must truly be a cache; e.g., the caller must be able to produce
// the object some other way if the cache hit misses.
//
// - This must truly be a handle; e.g., the Python object referenced by
// this class must have static lifetime. This means we don't have to
// maintain strong ownership or deallocate the object when the C++ object
// dies. Static lifetime is a good idea in conjunction with the cache,
// since if you are producing a fresh object on miss you won't be
// maintaining object identity. If you need bidirectional ownership,
// you will want to factor out the pattern in TensorImpl with
// resurrection.
//
// This cache is expected to not improve perf under torchdeploy, as one
// interpreter will fill up the cache, and all the interpreters will be
// unable to use the slot. A potential improvement is to have multiple
// slots (one per interpreter), which will work in deployment scenarios
// where there a stable, fixed number of interpreters. You can also store
// the relevant state in the Python library, rather than in the non-Python
// library (although in many cases, this is not convenient, as there may
// not be a way to conveniently index based on the object.)
class PyHandleCache {
public:
PyHandleCache() : pyinterpreter_(nullptr), data_(nullptr) {}
// Attempt to fetch the pointer from the cache, if the PyInterpreter
// matches. If it doesn't exist, or the cache entry is not valid,
// use slow_accessor to get the real pointer value and return that
// (possibly writing it to the cache, if the cache entry is
// available.)
template <typename F>
PyObject* ptr_or(impl::PyInterpreter* self_interpreter, F slow_accessor)
const {
// Note [Memory ordering on Python interpreter tag]
impl::PyInterpreter* interpreter =
pyinterpreter_.load(std::memory_order_acquire);
if (C10_LIKELY(interpreter == self_interpreter)) {
return data_;
} else if (interpreter == nullptr) {
auto* r = slow_accessor();
impl::PyInterpreter* expected = nullptr;
// attempt to claim this cache entry with the specified interpreter tag
if (pyinterpreter_.compare_exchange_strong(
expected, self_interpreter, std::memory_order_acq_rel)) {
data_ = r;
}
// This shouldn't be possible, as you should be GIL protected
TORCH_INTERNAL_ASSERT(expected != self_interpreter);
return r;
} else {
return slow_accessor();
}
}
private:
mutable std::atomic<impl::PyInterpreter*> pyinterpreter_;
mutable PyObject* data_;
};
} // namespace c10
| 3,076
| 39.486842
| 77
|
h
|
null |
pytorch-main/c10/core/QEngine.h
|
#pragma once
#include <c10/core/DeviceType.h>
#include <c10/core/DispatchKey.h>
#include <c10/util/Exception.h>
namespace c10 {
/**
* QEngine is an enum that is used to select the engine to run quantized ops.
* Keep this enum in sync with get_qengine_id() in
* torch/backends/quantized/__init__.py
*/
enum class QEngine : uint8_t {
NoQEngine = 0,
FBGEMM = 1,
QNNPACK = 2,
ONEDNN = 3,
X86 = 4,
};
constexpr auto kNoQEngine = QEngine::NoQEngine;
constexpr auto kFBGEMM = QEngine::FBGEMM;
constexpr auto kQNNPACK = QEngine::QNNPACK;
constexpr auto kONEDNN = QEngine::ONEDNN;
constexpr auto kX86 = QEngine::X86;
inline std::string toString(QEngine qengine) {
switch (qengine) {
case kNoQEngine:
return "NoQEngine";
case kFBGEMM:
return "FBGEMM";
case kQNNPACK:
return "QNNPACK";
case kONEDNN:
return "ONEDNN";
case kX86:
return "X86";
default:
TORCH_CHECK(
false, "Unrecognized Quantized Engine: ", static_cast<int>(qengine));
}
}
} // namespace c10
| 1,040
| 21.148936
| 79
|
h
|
null |
pytorch-main/c10/core/QScheme.h
|
#pragma once
#include <c10/core/DeviceType.h>
#include <c10/util/Exception.h>
namespace c10 {
/**
* QScheme is an enum that specifies the type of quantization. This has a one
* to one correspondence with Quantizer
* Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes.
* Keep this file in sync with torch/nn/_qscheme.py
*/
enum class QScheme : uint8_t {
PER_TENSOR_AFFINE = 0,
PER_CHANNEL_AFFINE = 1,
PER_TENSOR_SYMMETRIC = 2,
PER_CHANNEL_SYMMETRIC = 3,
PER_CHANNEL_AFFINE_FLOAT_QPARAMS = 4,
COMPILE_TIME_NUM_QSCHEMES = 5,
};
constexpr auto kPerTensorAffine = QScheme::PER_TENSOR_AFFINE;
constexpr auto kPerChannelAffine = QScheme::PER_CHANNEL_AFFINE;
constexpr auto kPerTensorSymmetric = QScheme::PER_TENSOR_SYMMETRIC;
constexpr auto kPerChannelSymmetric = QScheme::PER_CHANNEL_SYMMETRIC;
constexpr auto kPerChannelAffineFloatQParams =
QScheme::PER_CHANNEL_AFFINE_FLOAT_QPARAMS;
constexpr int COMPILE_TIME_NUM_QSCHEMES =
static_cast<int>(QScheme::COMPILE_TIME_NUM_QSCHEMES);
inline std::string toString(QScheme qscheme) {
switch (qscheme) {
case kPerTensorAffine:
return "per_tensor_affine";
case kPerChannelAffine:
return "per_channel_affine";
case kPerTensorSymmetric:
return "per_tensor_symmetric";
case kPerChannelSymmetric:
return "per_channel_symmetric";
case kPerChannelAffineFloatQParams:
return "per_channel_affine_float_qparams";
default:
TORCH_CHECK(false, "Unrecognized qscheme: ", static_cast<int>(qscheme));
}
}
} // namespace c10
| 1,562
| 30.26
| 78
|
h
|
null |
pytorch-main/c10/core/SafePyObject.h
|
#pragma once
#include <c10/core/impl/PyInterpreter.h>
#include <c10/macros/Export.h>
#include <c10/util/python_stub.h>
namespace c10 {
// This is an safe owning holder for a PyObject, akin to pybind11's
// py::object, with two major differences:
//
// - It is in c10/core; i.e., you can use this type in contexts where
// you do not have a libpython dependency
//
// - It is multi-interpreter safe (ala torchdeploy); when you fetch
// the underlying PyObject* you are required to specify what the current
// interpreter context is and we will check that you match it.
//
// It is INVALID to store a reference to a Tensor object in this way;
// you should just use TensorImpl directly in that case!
struct C10_API SafePyObject {
// Steals a reference to data
SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
: data_(data), pyinterpreter_(pyinterpreter) {}
SafePyObject(SafePyObject&& other)
: data_(std::exchange(other.data_, nullptr)),
pyinterpreter_(other.pyinterpreter_) {}
// In principle this could be copyable if we add an incref to PyInterpreter
// but for now it's easier to just disallow it.
SafePyObject(SafePyObject const&) = delete;
SafePyObject& operator=(SafePyObject const&) = delete;
~SafePyObject() {
if (data_ != nullptr) {
(*pyinterpreter_)->decref(data_, /*is_tensor*/ false);
}
}
c10::impl::PyInterpreter& pyinterpreter() const {
return *pyinterpreter_;
}
PyObject* ptr(const c10::impl::PyInterpreter*) const;
// stop tracking the current object, and return it
PyObject* release() {
auto rv = data_;
data_ = nullptr;
return rv;
}
private:
PyObject* data_;
c10::impl::PyInterpreter* pyinterpreter_;
};
// Like SafePyObject, but non-owning. Good for references to global PyObjects
// that will be leaked on interpreter exit. You get a copy constructor/assign
// this way.
struct C10_API SafePyHandle {
SafePyHandle() : data_(nullptr), pyinterpreter_(nullptr) {}
SafePyHandle(PyObject* data, c10::impl::PyInterpreter* pyinterpreter)
: data_(data), pyinterpreter_(pyinterpreter) {}
c10::impl::PyInterpreter& pyinterpreter() const {
return *pyinterpreter_;
}
PyObject* ptr(const c10::impl::PyInterpreter*) const;
void reset() {
data_ = nullptr;
pyinterpreter_ = nullptr;
}
operator bool() {
return data_;
}
private:
PyObject* data_;
c10::impl::PyInterpreter* pyinterpreter_;
};
} // namespace c10
| 2,488
| 28.987952
| 78
|
h
|
null |
pytorch-main/c10/core/Scalar.h
|
#pragma once
#include <stdint.h>
#include <stdexcept>
#include <type_traits>
#include <utility>
#include <c10/core/OptionalRef.h>
#include <c10/core/ScalarType.h>
#include <c10/core/SymFloat.h>
#include <c10/core/SymInt.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/Half.h>
#include <c10/util/TypeCast.h>
#include <c10/util/intrusive_ptr.h>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
#endif
namespace c10 {
/**
* Scalar represents a 0-dimensional tensor which contains a single element.
* Unlike a tensor, numeric literals (in C++) are implicitly convertible to
* Scalar (which is why, for example, we provide both add(Tensor) and
* add(Scalar) overloads for many operations). It may also be used in
* circumstances where you statically know a tensor is 0-dim and single size,
* but don't know its type.
*/
class C10_API Scalar {
public:
Scalar() : Scalar(int64_t(0)) {}
void destroy() {
if (Tag::HAS_si == tag || Tag::HAS_sd == tag || Tag::HAS_sb == tag) {
raw::intrusive_ptr::decref(v.p);
v.p = nullptr;
}
}
~Scalar() {
destroy();
}
#define DEFINE_IMPLICIT_CTOR(type, name) \
Scalar(type vv) : Scalar(vv, true) {}
AT_FORALL_SCALAR_TYPES_AND3(Half, BFloat16, ComplexHalf, DEFINE_IMPLICIT_CTOR)
AT_FORALL_COMPLEX_TYPES(DEFINE_IMPLICIT_CTOR)
#undef DEFINE_IMPLICIT_CTOR
// Value* is both implicitly convertible to SymbolicVariable and bool which
// causes ambiguity error. Specialized constructor for bool resolves this
// problem.
template <
typename T,
typename std::enable_if<std::is_same<T, bool>::value, bool>::type* =
nullptr>
Scalar(T vv) : tag(Tag::HAS_b) {
v.i = convert<int64_t, bool>(vv);
}
template <
typename T,
typename std::enable_if<std::is_same<T, c10::SymBool>::value, bool>::
type* = nullptr>
Scalar(T vv) : tag(Tag::HAS_sb) {
v.i = convert<int64_t, c10::SymBool>(vv);
}
#define DEFINE_ACCESSOR(type, name) \
type to##name() const { \
if (Tag::HAS_d == tag) { \
return checked_convert<type, double>(v.d, #type); \
} else if (Tag::HAS_z == tag) { \
return checked_convert<type, c10::complex<double>>(v.z, #type); \
} \
if (Tag::HAS_b == tag) { \
return checked_convert<type, bool>(v.i, #type); \
} else if (Tag::HAS_i == tag) { \
return checked_convert<type, int64_t>(v.i, #type); \
} else if (Tag::HAS_si == tag) { \
TORCH_CHECK(false, "tried to get " #name " out of SymInt") \
} else if (Tag::HAS_sd == tag) { \
TORCH_CHECK(false, "tried to get " #name " out of SymFloat") \
} else if (Tag::HAS_sb == tag) { \
TORCH_CHECK(false, "tried to get " #name " out of SymBool") \
} \
TORCH_CHECK(false) \
}
// TODO: Support ComplexHalf accessor
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ACCESSOR)
#undef DEFINE_ACCESSOR
SymInt toSymInt() const {
if (Tag::HAS_si == tag) {
return c10::SymInt(intrusive_ptr<SymNodeImpl>::reclaim_copy(
static_cast<SymNodeImpl*>(v.p)));
} else {
return toLong();
}
}
SymFloat toSymFloat() const {
if (Tag::HAS_sd == tag) {
return c10::SymFloat(intrusive_ptr<SymNodeImpl>::reclaim_copy(
static_cast<SymNodeImpl*>(v.p)));
} else {
return toDouble();
}
}
SymBool toSymBool() const {
if (Tag::HAS_sb == tag) {
return c10::SymBool(intrusive_ptr<SymNodeImpl>::reclaim_copy(
static_cast<SymNodeImpl*>(v.p)));
} else {
return toBool();
}
}
// also support scalar.to<int64_t>();
// Deleted for unsupported types, but specialized below for supported types
template <typename T>
T to() const = delete;
// audit uses of data_ptr
const void* data_ptr() const {
TORCH_INTERNAL_ASSERT(!isSymbolic());
return static_cast<const void*>(&v);
}
bool isFloatingPoint() const {
return Tag::HAS_d == tag || Tag::HAS_sd == tag;
}
C10_DEPRECATED_MESSAGE(
"isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.")
bool isIntegral() const {
return Tag::HAS_i == tag || Tag::HAS_si == tag;
}
bool isIntegral(bool includeBool) const {
return Tag::HAS_i == tag || Tag::HAS_si == tag ||
(includeBool && isBoolean());
}
bool isComplex() const {
return Tag::HAS_z == tag;
}
bool isBoolean() const {
return Tag::HAS_b == tag || Tag::HAS_sb == tag;
}
// you probably don't actually want these; they're mostly for testing
bool isSymInt() const {
return Tag::HAS_si == tag;
}
bool isSymFloat() const {
return Tag::HAS_sd == tag;
}
bool isSymBool() const {
return Tag::HAS_sb == tag;
}
bool isSymbolic() const {
return Tag::HAS_si == tag || Tag::HAS_sd == tag || Tag::HAS_sb == tag;
}
C10_ALWAYS_INLINE Scalar& operator=(Scalar&& other) noexcept {
if (&other == this) {
return *this;
}
destroy();
moveFrom(std::move(other));
return *this;
}
C10_ALWAYS_INLINE Scalar& operator=(const Scalar& other) {
if (&other == this) {
return *this;
}
*this = Scalar(other);
return *this;
}
Scalar operator-() const;
Scalar conj() const;
Scalar log() const;
template <
typename T,
typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
bool equal(T num) const {
if (isComplex()) {
TORCH_INTERNAL_ASSERT(!isSymbolic());
auto val = v.z;
return (val.real() == num) && (val.imag() == T());
} else if (isFloatingPoint()) {
TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality");
return v.d == num;
} else if (isIntegral(/*includeBool=*/false)) {
TORCH_CHECK(!isSymbolic(), "NYI SymInt equality");
return v.i == num;
} else if (isBoolean()) {
// boolean scalar does not equal to a non boolean value
TORCH_INTERNAL_ASSERT(!isSymbolic());
return false;
} else {
TORCH_INTERNAL_ASSERT(false);
}
}
template <
typename T,
typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
bool equal(T num) const {
if (isComplex()) {
TORCH_INTERNAL_ASSERT(!isSymbolic());
return v.z == num;
} else if (isFloatingPoint()) {
TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality");
return (v.d == num.real()) && (num.imag() == T());
} else if (isIntegral(/*includeBool=*/false)) {
TORCH_CHECK(!isSymbolic(), "NYI SymInt equality");
return (v.i == num.real()) && (num.imag() == T());
} else if (isBoolean()) {
// boolean scalar does not equal to a non boolean value
TORCH_INTERNAL_ASSERT(!isSymbolic());
return false;
} else {
TORCH_INTERNAL_ASSERT(false);
}
}
bool equal(bool num) const {
if (isBoolean()) {
TORCH_INTERNAL_ASSERT(!isSymbolic());
return static_cast<bool>(v.i) == num;
} else {
return false;
}
}
ScalarType type() const {
if (isComplex()) {
return ScalarType::ComplexDouble;
} else if (isFloatingPoint()) {
return ScalarType::Double;
} else if (isIntegral(/*includeBool=*/false)) {
return ScalarType::Long;
} else if (isBoolean()) {
return ScalarType::Bool;
} else {
throw std::runtime_error("Unknown scalar type.");
}
}
Scalar(Scalar&& rhs) noexcept : tag(rhs.tag) {
moveFrom(std::move(rhs));
}
Scalar(const Scalar& rhs) : tag(rhs.tag), v(rhs.v) {
if (isSymbolic()) {
c10::raw::intrusive_ptr::incref(v.p);
}
}
Scalar(c10::SymInt si) {
if (auto m = si.maybe_as_int()) {
tag = Tag::HAS_i;
v.i = *m;
} else {
tag = Tag::HAS_si;
v.p = std::move(si).release();
}
}
Scalar(c10::SymFloat sd) {
if (sd.is_symbolic()) {
tag = Tag::HAS_sd;
v.p = std::move(sd).release();
} else {
tag = Tag::HAS_d;
v.d = sd.as_float_unchecked();
}
}
Scalar(c10::SymBool sb) {
if (sb.is_symbolic()) {
tag = Tag::HAS_sb;
v.p = std::move(sb).release();
} else {
tag = Tag::HAS_b;
v.d = sb.as_bool_unchecked();
}
}
// We can't set v in the initializer list using the
// syntax v{ .member = ... } because it doesn't work on MSVC
private:
enum class Tag { HAS_d, HAS_i, HAS_z, HAS_b, HAS_sd, HAS_si, HAS_sb };
// NB: assumes that self has already been cleared
C10_ALWAYS_INLINE void moveFrom(Scalar&& rhs) noexcept {
v = rhs.v;
tag = rhs.tag;
if (rhs.tag == Tag::HAS_si || rhs.tag == Tag::HAS_sd ||
rhs.tag == Tag::HAS_sb) {
// Move out of scalar
rhs.tag = Tag::HAS_i;
rhs.v.i = 0;
}
}
Tag tag;
union v_t {
double d{};
int64_t i;
c10::complex<double> z;
c10::intrusive_ptr_target* p;
v_t() {} // default constructor
} v;
template <
typename T,
typename std::enable_if<
std::is_integral<T>::value && !std::is_same<T, bool>::value,
bool>::type* = nullptr>
Scalar(T vv, bool) : tag(Tag::HAS_i) {
v.i = convert<decltype(v.i), T>(vv);
}
template <
typename T,
typename std::enable_if<
!std::is_integral<T>::value && !c10::is_complex<T>::value,
bool>::type* = nullptr>
Scalar(T vv, bool) : tag(Tag::HAS_d) {
v.d = convert<decltype(v.d), T>(vv);
}
template <
typename T,
typename std::enable_if<c10::is_complex<T>::value, bool>::type* = nullptr>
Scalar(T vv, bool) : tag(Tag::HAS_z) {
v.z = convert<decltype(v.z), T>(vv);
}
};
using OptionalScalarRef = c10::OptionalRef<Scalar>;
// define the scalar.to<int64_t>() specializations
#define DEFINE_TO(T, name) \
template <> \
inline T Scalar::to<T>() const { \
return to##name(); \
}
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_TO)
#undef DEFINE_TO
} // namespace c10
C10_CLANG_DIAGNOSTIC_POP()
| 10,570
| 27.340483
| 96
|
h
|
null |
pytorch-main/c10/core/ScalarType.h
|
#pragma once
#include <c10/util/BFloat16.h>
#include <c10/util/Deprecated.h>
#include <c10/util/Exception.h>
#include <c10/util/Half.h>
#include <c10/util/bits.h>
#include <c10/util/complex.h>
#include <c10/util/qint32.h>
#include <c10/util/qint8.h>
#include <c10/util/quint2x4.h>
#include <c10/util/quint4x2.h>
#include <c10/util/quint8.h>
#include <complex>
#include <cstdint>
#include <ostream>
namespace c10 {
// For the macros below:
// NB: If you want to macro some code for all non-QInt scalar types (i.e. types
// with complete information, you probably want one of the
// AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND
// macros below, which are designed to behave similarly to the Dispatch macros
// with the same name.
// NB: Order matters for this macro; it is relied upon in
// _promoteTypesLookup and the serialization format.
#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) \
_(uint8_t, Byte) /* 0 */ \
_(int8_t, Char) /* 1 */ \
_(int16_t, Short) /* 2 */ \
_(int, Int) /* 3 */ \
_(int64_t, Long) /* 4 */ \
_(at::Half, Half) /* 5 */ \
_(float, Float) /* 6 */ \
_(double, Double) /* 7 */ \
_(c10::complex<c10::Half>, ComplexHalf) /* 8 */ \
_(c10::complex<float>, ComplexFloat) /* 9 */ \
_(c10::complex<double>, ComplexDouble) /* 10 */ \
_(bool, Bool) /* 11 */ \
_(c10::qint8, QInt8) /* 12 */ \
_(c10::quint8, QUInt8) /* 13 */ \
_(c10::qint32, QInt32) /* 14 */ \
_(at::BFloat16, BFloat16) /* 15 */ \
_(c10::quint4x2, QUInt4x2) /* 16 */ \
_(c10::quint2x4, QUInt2x4) /* 17 */ \
_(c10::bits1x8, Bits1x8) /* 18 */ \
_(c10::bits2x4, Bits2x4) /* 19 */ \
_(c10::bits4x2, Bits4x2) /* 20 */ \
_(c10::bits8, Bits8) /* 21 */ \
_(c10::bits16, Bits16) /* 22 */
// If you want to support ComplexHalf for real, add ComplexHalf
// into this macro (and change the name). But beware: convert()
// doesn't work for all the conversions you need...
#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF(_) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long) \
_(at::Half, Half) \
_(float, Float) \
_(double, Double) \
_(c10::complex<float>, ComplexFloat) \
_(c10::complex<double>, ComplexDouble) \
_(bool, Bool) \
_(at::BFloat16, BFloat16)
#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long) \
_(at::Half, Half) \
_(float, Float) \
_(double, Double) \
_(c10::complex<c10::Half>, ComplexHalf) \
_(c10::complex<float>, ComplexFloat) \
_(c10::complex<double>, ComplexDouble) \
_(bool, Bool) \
_(at::BFloat16, BFloat16)
enum class ScalarType : int8_t {
#define DEFINE_ENUM(_1, n) n,
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_ENUM)
#undef DEFINE_ENUM
Undefined,
NumOptions
};
constexpr uint16_t NumScalarTypes =
static_cast<uint16_t>(ScalarType::NumOptions);
namespace impl {
// These are used to map ScalarTypes to C++ types.
template <c10::ScalarType N>
struct ScalarTypeToCPPType;
#define SPECIALIZE_ScalarTypeToCPPType(cpp_type, scalar_type) \
template <> \
struct ScalarTypeToCPPType<c10::ScalarType::scalar_type> { \
using type = cpp_type; \
\
/* This is a workaround for the CUDA bug which prevents */ \
/* ::detail::ScalarTypeToCType<T>::type being used directly due to */ \
/* ambiguous reference which can't to be resolved. For some reason it */ \
/* can't pick between at::detail and at::cuda::detail. */ \
/* For repro example, please see: */ \
/* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \
/* TODO: remove once the bug is fixed. */ \
static type t; \
};
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_ScalarTypeToCPPType)
#undef SPECIALIZE_ScalarTypeToCPPType
template <c10::ScalarType N>
using ScalarTypeToCPPTypeT = typename ScalarTypeToCPPType<N>::type;
} // namespace impl
template <typename T>
struct CppTypeToScalarType;
#define SPECIALIZE_CppTypeToScalarType(cpp_type, scalar_type) \
template <> \
struct CppTypeToScalarType<cpp_type> \
: std:: \
integral_constant<c10::ScalarType, c10::ScalarType::scalar_type> { \
};
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType)
#undef SPECIALIZE_CppTypeToScalarType
#define AT_FORALL_INT_TYPES(_) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long)
#define AT_FORALL_SCALAR_TYPES(_) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long) \
_(float, Float) \
_(double, Double)
#define AT_FORALL_SCALAR_TYPES_AND(SCALARTYPE, _) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long) \
_(float, Float) \
_(double, Double) \
_(decltype(::c10::impl::ScalarTypeToCPPType< \
::c10::ScalarType::SCALARTYPE>::t), \
SCALARTYPE)
#define AT_FORALL_SCALAR_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, _) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long) \
_(float, Float) \
_(double, Double) \
_(decltype(::c10::impl::ScalarTypeToCPPType< \
::c10::ScalarType::SCALARTYPE1>::t), \
SCALARTYPE1) \
_(decltype(::c10::impl::ScalarTypeToCPPType< \
::c10::ScalarType::SCALARTYPE2>::t), \
SCALARTYPE2)
#define AT_FORALL_SCALAR_TYPES_AND3(SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, _) \
_(uint8_t, Byte) \
_(int8_t, Char) \
_(int16_t, Short) \
_(int, Int) \
_(int64_t, Long) \
_(float, Float) \
_(double, Double) \
_(decltype(::c10::impl::ScalarTypeToCPPType< \
::c10::ScalarType::SCALARTYPE1>::t), \
SCALARTYPE1) \
_(decltype(::c10::impl::ScalarTypeToCPPType< \
::c10::ScalarType::SCALARTYPE2>::t), \
SCALARTYPE2) \
_(decltype(::c10::impl::ScalarTypeToCPPType< \
::c10::ScalarType::SCALARTYPE3>::t), \
SCALARTYPE3)
#define AT_FORALL_QINT_TYPES(_) \
_(c10::qint8, QInt8) \
_(c10::quint8, QUInt8) \
_(c10::qint32, QInt32) \
_(c10::quint4x2, QUInt4x2) \
_(c10::quint2x4, QUInt2x4)
#define AT_FORALL_COMPLEX_TYPES(_) \
_(c10::complex<float>, ComplexFloat) \
_(c10::complex<double>, ComplexDouble)
#define DEFINE_CONSTANT(_, name) \
constexpr ScalarType k##name = ScalarType::name;
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT)
#undef DEFINE_CONSTANT
static inline const char* toString(ScalarType t) {
#define DEFINE_CASE(_, name) \
case ScalarType::name: \
return #name;
switch (t) {
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CASE)
default:
return "UNKNOWN_SCALAR";
}
#undef DEFINE_CASE
}
static inline size_t elementSize(ScalarType t) {
#define CASE_ELEMENTSIZE_CASE(ctype, name) \
case ScalarType::name: \
return sizeof(ctype);
switch (t) {
AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE)
default:
TORCH_CHECK(false, "Unknown ScalarType");
}
#undef CASE_ELEMENTSIZE_CASE
}
static inline bool isIntegralType(ScalarType t, bool includeBool) {
bool isIntegral =
(t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int ||
t == ScalarType::Long || t == ScalarType::Short);
return isIntegral || (includeBool && t == ScalarType::Bool);
}
C10_DEPRECATED_MESSAGE(
"isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.")
static inline bool isIntegralType(ScalarType t) {
return isIntegralType(t, /*includeBool=*/false);
}
static inline bool isFloatingType(ScalarType t) {
return (
t == ScalarType::Double || t == ScalarType::Float ||
t == ScalarType::Half || t == ScalarType::BFloat16);
}
static inline bool isReducedFloatingType(ScalarType t) {
return (t == ScalarType::Half || t == ScalarType::BFloat16);
}
static inline bool isComplexType(ScalarType t) {
return (
t == ScalarType::ComplexHalf || t == ScalarType::ComplexFloat ||
t == ScalarType::ComplexDouble);
}
static inline bool isQIntType(ScalarType t) {
// Don't forget to extend this when adding new QInt types
return t == ScalarType::QInt8 || t == ScalarType::QUInt8 ||
t == ScalarType::QInt32 || t == ScalarType::QUInt4x2 ||
t == ScalarType::QUInt2x4;
}
static inline bool isBitsType(ScalarType t) {
return t == ScalarType::Bits1x8 || t == ScalarType::Bits2x4 ||
t == ScalarType::Bits4x2 || t == ScalarType::Bits8 ||
t == ScalarType::Bits16;
}
static inline ScalarType toQIntType(ScalarType t) {
switch (t) {
case ScalarType::Byte:
return ScalarType::QUInt8;
case ScalarType::Char:
return ScalarType::QInt8;
case ScalarType::Int:
return ScalarType::QInt32;
default:
return t;
}
}
static inline ScalarType toUnderlying(ScalarType t) {
switch (t) {
case ScalarType::QUInt8:
return ScalarType::Byte;
case ScalarType::QInt8:
return ScalarType::Char;
case ScalarType::QInt32:
return ScalarType::Int;
case ScalarType::QUInt4x2:
return ScalarType::Byte;
case ScalarType::QUInt2x4:
return ScalarType::Byte;
default:
return t;
}
}
static inline bool isSignedType(ScalarType t) {
TORCH_CHECK(!isQIntType(t), "isSignedType not supported for quantized types");
#define CASE_SIGNED(ctype, name) \
case ScalarType::name: \
return std::numeric_limits<ctype>::is_signed;
switch (t) {
case ScalarType::Bits1x8:
case ScalarType::Bits2x4:
case ScalarType::Bits4x2:
case ScalarType::Bits8:
case ScalarType::Bits16:
TORCH_CHECK(false, "Bits types are undefined");
case ScalarType::ComplexHalf:
case ScalarType::ComplexFloat:
case ScalarType::ComplexDouble:
return true;
AT_FORALL_SCALAR_TYPES_AND3(Half, Bool, BFloat16, CASE_SIGNED)
default:
TORCH_CHECK(false, "Unknown ScalarType");
}
#undef CASE_SIGNED
}
static inline bool isUnderlying(ScalarType type, ScalarType qtype) {
return type == toUnderlying(qtype);
}
static inline ScalarType toRealValueType(ScalarType t) {
switch (t) {
case ScalarType::ComplexHalf:
return ScalarType::Half;
case ScalarType::ComplexFloat:
return ScalarType::Float;
case ScalarType::ComplexDouble:
return ScalarType::Double;
default:
return t;
}
}
static inline ScalarType toComplexType(ScalarType t) {
switch (t) {
case ScalarType::BFloat16:
// BFloat16 has range equivalent to Float,
// so we map it to ComplexFloat.
return ScalarType::ComplexFloat;
case ScalarType::Half:
return ScalarType::ComplexHalf;
case ScalarType::Float:
return ScalarType::ComplexFloat;
case ScalarType::Double:
return ScalarType::ComplexDouble;
case ScalarType::ComplexHalf:
return ScalarType::ComplexHalf;
case ScalarType::ComplexFloat:
return ScalarType::ComplexFloat;
case ScalarType::ComplexDouble:
return ScalarType::ComplexDouble;
default:
TORCH_CHECK(false, "Unknown Complex ScalarType for ", t);
}
}
// see tensor_attributes.rst for detailed explanation and examples
// of casting rules.
static inline bool canCast(const ScalarType from, const ScalarType to) {
// We disallow complex -> non complex, e.g., float_tensor *= complex is
// disallowed.
if (isComplexType(from) && !isComplexType(to)) {
return false;
}
// We disallow float -> integral, e.g., int_tensor *= float is disallowed.
if (isFloatingType(from) && isIntegralType(to, false)) {
return false;
}
// Treat bool as a distinct "category," to be consistent with type promotion
// rules (e.g. `bool_tensor + 5 -> int64_tensor`). If `5` was in the same
// category as `bool_tensor`, we would not promote. Differing categories
// implies `bool_tensor += 5` is disallowed.
//
// NB: numpy distinguishes "unsigned" as a category to get the desired
// `bool_tensor + 5 -> int64_tensor` behavior. We don't, because:
// * We don't want the performance hit of checking the runtime sign of
// Scalars.
// * `uint8_tensor + 5 -> int64_tensor` would be undesirable.
if (from != ScalarType::Bool && to == ScalarType::Bool) {
return false;
}
return true;
}
static inline ScalarType promoteTypes(ScalarType a, ScalarType b) {
// This is generated according to NumPy's promote_types
constexpr auto u1 = ScalarType::Byte;
constexpr auto i1 = ScalarType::Char;
constexpr auto i2 = ScalarType::Short;
constexpr auto i4 = ScalarType::Int;
constexpr auto i8 = ScalarType::Long;
constexpr auto f2 = ScalarType::Half;
constexpr auto f4 = ScalarType::Float;
constexpr auto f8 = ScalarType::Double;
constexpr auto c2 = ScalarType::ComplexHalf;
constexpr auto c4 = ScalarType::ComplexFloat;
constexpr auto c8 = ScalarType::ComplexDouble;
constexpr auto b1 = ScalarType::Bool;
constexpr auto bf = ScalarType::BFloat16;
constexpr auto ud = ScalarType::Undefined;
if (a == ud || b == ud) {
return ScalarType::Undefined;
}
// For QInt types, we only allow exact match
if (isQIntType(a) && a == b) {
return a;
}
if (isQIntType(a) || isQIntType(b)) {
TORCH_CHECK(
false,
"promoteTypes with quantized numbers is not handled yet; figure out what the correct rules should be, offending types: ",
toString(a),
" ",
toString(b));
}
if (isBitsType(a) && a == b) {
return a;
} else if (isBitsType(a) || isBitsType(b)) {
return ScalarType::Undefined;
}
// Ignore the 5 bits types, since they are handled by the if statement
// above and do not participate in type promotion. The `5` value has to
// be consistent with the number of the unique `c10::bits*` types that
// exist.
const int NUM_PROMOTE_TYPES = static_cast<int>(ScalarType::NumOptions) - 5;
// this matrix has to be consistent with
// AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS undefined is used where we
// are not sure about the correct value for type promotion.
// clang-format off
static constexpr ScalarType _promoteTypesLookup[
NUM_PROMOTE_TYPES][NUM_PROMOTE_TYPES] = {
/* u1 i1 i2 i4 i8 f2 f4 f8 c2 c4 c8 b1 q1 q2 q3 bf*/
/* u1 */ {u1, i2, i2, i4, i8, f2, f4, f8, c2, c4, c8, u1, ud, ud, ud, bf},
/* i1 */ {i2, i1, i2, i4, i8, f2, f4, f8, c2, c4, c8, i1, ud, ud, ud, bf},
/* i2 */ {i2, i2, i2, i4, i8, f2, f4, f8, c2, c4, c8, i2, ud, ud, ud, bf},
/* i4 */ {i4, i4, i4, i4, i8, f2, f4, f8, c2, c4, c8, i4, ud, ud, ud, bf},
/* i8 */ {i8, i8, i8, i8, i8, f2, f4, f8, c2, c4, c8, i8, ud, ud, ud, bf},
/* f2 */ {f2, f2, f2, f2, f2, f2, f4, f8, c2, c4, c8, f2, ud, ud, ud, f4},
/* f4 */ {f4, f4, f4, f4, f4, f4, f4, f8, c4, c4, c8, f4, ud, ud, ud, f4},
/* f8 */ {f8, f8, f8, f8, f8, f8, f8, f8, c8, c8, c8, f8, ud, ud, ud, f8},
/* c2 */ {c2, c2, c2, c2, c2, c2, c4, c8, c2, c4, c8, c2, ud, ud, ud, c4},
/* c4 */ {c4, c4, c4, c4, c4, c4, c4, c8, c4, c4, c8, c4, ud, ud, ud, c4},
/* c8 */ {c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, c8, ud, ud, ud, c8},
/* b1 */ {u1, i1, i2, i4, i8, f2, f4, f8, c2, c4, c8, b1, ud, ud, ud, bf},
/* q1 */ {ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud},
/* q2 */ {ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud},
/* q3 */ {ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud, ud},
/* bf */ {bf, bf, bf, bf, bf, f4, f4, f8, c4, c4, c8, bf, ud, ud, ud, bf},
};
// clang-format on
return _promoteTypesLookup[static_cast<int>(a)][static_cast<int>(b)];
}
inline std::ostream& operator<<(
std::ostream& stream,
at::ScalarType scalar_type) {
return stream << toString(scalar_type);
}
#define AT_FORAUTOCAST_SCALAR_TYPES(_) \
_(half, Half) /* 0 */ \
_(bfloat16, BFloat16) /* 1 */
} // namespace c10
| 19,365
| 37.88755
| 129
|
h
|
null |
pytorch-main/c10/core/ScalarTypeToTypeMeta.h
|
#pragma once
#include <c10/core/ScalarType.h>
#include <c10/util/Optional.h>
#include <c10/util/typeid.h>
// these just expose TypeMeta/ScalarType bridge functions in c10
// TODO move to typeid.h (or codemod away) when TypeMeta et al
// are moved from caffe2 to c10 (see note at top of typeid.h)
namespace c10 {
/**
* convert ScalarType enum values to TypeMeta handles
*/
static inline caffe2::TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type) {
return caffe2::TypeMeta::fromScalarType(scalar_type);
}
/**
* convert TypeMeta handles to ScalarType enum values
*/
static inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) {
return dtype.toScalarType();
}
/**
* typeMetaToScalarType(), lifted to optional
*/
static inline optional<at::ScalarType> optTypeMetaToScalarType(
optional<caffe2::TypeMeta> type_meta) {
if (!type_meta.has_value()) {
return c10::nullopt;
}
return type_meta->toScalarType();
}
/**
* convenience: equality across TypeMeta/ScalarType conversion
*/
static inline bool operator==(ScalarType t, caffe2::TypeMeta m) {
return m.isScalarType(t);
}
static inline bool operator==(caffe2::TypeMeta m, ScalarType t) {
return t == m;
}
static inline bool operator!=(ScalarType t, caffe2::TypeMeta m) {
return !(t == m);
}
static inline bool operator!=(caffe2::TypeMeta m, ScalarType t) {
return !(t == m);
}
} // namespace c10
| 1,396
| 23.086207
| 77
|
h
|
null |
pytorch-main/c10/core/Storage.h
|
#pragma once
#include <c10/core/StorageImpl.h>
namespace c10 {
struct C10_API Storage {
public:
struct use_byte_size_t {};
Storage() = default;
Storage(c10::intrusive_ptr<StorageImpl> ptr)
: storage_impl_(std::move(ptr)) {}
// Allocates memory buffer using given allocator and creates a storage with it
Storage(
use_byte_size_t /*use_byte_size*/,
SymInt size_bytes,
Allocator* allocator = nullptr,
bool resizable = false)
: storage_impl_(c10::make_intrusive<StorageImpl>(
StorageImpl::use_byte_size_t(),
std::move(size_bytes),
allocator,
resizable)) {}
// Creates storage with pre-allocated memory buffer. Allocator is given for
// potential future reallocations, however it can be nullptr if the storage
// is non-resizable
Storage(
use_byte_size_t /*use_byte_size*/,
size_t size_bytes,
at::DataPtr data_ptr,
at::Allocator* allocator = nullptr,
bool resizable = false)
: storage_impl_(c10::make_intrusive<StorageImpl>(
StorageImpl::use_byte_size_t(),
size_bytes,
std::move(data_ptr),
allocator,
resizable)) {}
// Legacy constructor for partially initialized (dtype or memory) storages
// that can be temporarily created with Caffe2 APIs. See the note on top of
// TensorImpl.h for details.
static Storage create_legacy(at::Device device) {
auto allocator = GetAllocator(device.type());
return Storage(c10::make_intrusive<StorageImpl>(
StorageImpl::use_byte_size_t(),
0,
allocator->allocate(0), // materialize a non-default Device.
allocator,
true));
}
// Mimic create_legacy, but without requiring a newly-created StorageImpl.
void reset_legacy() {
TORCH_CHECK(resizable() && allocator());
set_nbytes(0);
set_data_ptr_noswap(allocator()->allocate(0));
}
// TODO: remove later
void set_nbytes(size_t size_bytes) const {
storage_impl_.get()->set_nbytes(size_bytes);
}
void set_nbytes(c10::SymInt size_bytes) const {
storage_impl_.get()->set_nbytes(std::move(size_bytes));
}
bool resizable() const {
return storage_impl_->resizable();
}
size_t nbytes() const {
return storage_impl_->nbytes();
}
SymInt sym_nbytes() const {
return storage_impl_->sym_nbytes();
}
// get() use here is to get const-correctness
const void* data() const {
return storage_impl_->data();
}
void* mutable_data() const {
return storage_impl_->mutable_data();
}
at::DataPtr& mutable_data_ptr() {
return storage_impl_->mutable_data_ptr();
}
const at::DataPtr& data_ptr() const {
return storage_impl_->data_ptr();
}
// Returns the previous data_ptr
at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) const {
return storage_impl_.get()->set_data_ptr(std::move(data_ptr));
}
void set_data_ptr_noswap(at::DataPtr&& data_ptr) const {
return storage_impl_.get()->set_data_ptr_noswap(std::move(data_ptr));
}
DeviceType device_type() const {
return storage_impl_->device_type();
}
at::Allocator* allocator() const {
return storage_impl_.get()->allocator();
}
at::Device device() const {
return storage_impl_->device();
}
StorageImpl* unsafeReleaseStorageImpl() {
return storage_impl_.release();
}
StorageImpl* unsafeGetStorageImpl() const noexcept {
return storage_impl_.get();
}
c10::weak_intrusive_ptr<StorageImpl> getWeakStorageImpl() const {
return c10::weak_intrusive_ptr<StorageImpl>(storage_impl_);
}
operator bool() const {
return storage_impl_;
}
size_t use_count() const {
return storage_impl_.use_count();
}
inline bool unique() const {
return storage_impl_.unique();
}
bool is_alias_of(const Storage& other) const {
return storage_impl_ == other.storage_impl_;
}
void UniqueStorageShareExternalPointer(
void* src,
size_t capacity,
DeleterFnPtr d = nullptr) {
if (!storage_impl_.unique()) {
TORCH_CHECK(
false,
"UniqueStorageShareExternalPointer can only be called when use_count == 1");
}
storage_impl_->UniqueStorageShareExternalPointer(src, capacity, d);
}
void UniqueStorageShareExternalPointer(
at::DataPtr&& data_ptr,
size_t capacity) {
if (!storage_impl_.unique()) {
TORCH_CHECK(
false,
"UniqueStorageShareExternalPointer can only be called when use_count == 1");
}
storage_impl_->UniqueStorageShareExternalPointer(
std::move(data_ptr), capacity);
}
protected:
c10::intrusive_ptr<StorageImpl> storage_impl_;
};
} // namespace c10
| 4,715
| 25.346369
| 86
|
h
|
null |
pytorch-main/c10/core/StorageImpl.h
|
#pragma once
#include <c10/core/Allocator.h>
#include <c10/core/SymInt.h>
#include <c10/core/impl/PyObjectSlot.h>
#include <c10/util/intrusive_ptr.h>
namespace c10 {
// A storage represents the underlying backing data buffer for a
// tensor. This concept was inherited from the original Torch7
// codebase; we'd kind of like to get rid of the concept
// (see https://github.com/pytorch/pytorch/issues/14797) but
// it's hard work and no one has gotten around to doing it.
//
// NB: storage is supposed to uniquely own a data pointer; e.g.,
// two non-null data pointers alias if and only if they are from
// the same storage. Technically you can violate this invariant
// (e.g., you can create a non-owning StorageImpl with at::from_blob)
// but a lot of things won't work correctly, including:
//
// - An ordinary deleter on such a storage is wrong, because normal deleters
// assume unique ownership, but if you have two storages at the same data,
// that implies there is some sort of shared ownership. So your deleter would
// have to actually be internally doing some sort of refcount thing
// - Deepcopy in Python side relies on storage equality and not data pointer
// equality; so if there are two separate storages pointing to the same data,
// the data will actually get duplicated in that case (one data ptr before,
// two data ptrs after)
// - Version counts won't work correctly, because we do all VC tracking at the
// level of storages (unless you explicitly disconnect the VC with detach);
// mutation because data pointers are the same are totally untracked
struct C10_API StorageImpl : public c10::intrusive_ptr_target {
public:
struct use_byte_size_t {};
StorageImpl(
use_byte_size_t /*use_byte_size*/,
SymInt size_bytes,
at::DataPtr data_ptr,
at::Allocator* allocator,
bool resizable)
: data_ptr_(std::move(data_ptr)),
size_bytes_(std::move(size_bytes)),
size_bytes_is_heap_allocated_(size_bytes_.is_heap_allocated()),
resizable_(resizable),
received_cuda_(false),
allocator_(allocator) {
if (resizable) {
TORCH_INTERNAL_ASSERT(
allocator_, "For resizable storage, allocator must be provided");
}
}
StorageImpl(
use_byte_size_t /*use_byte_size*/,
SymInt size_bytes,
at::Allocator* allocator,
bool resizable)
: StorageImpl(
use_byte_size_t(),
size_bytes,
size_bytes.is_heap_allocated()
? allocator->allocate(0)
: allocator->allocate(size_bytes.as_int_unchecked()),
allocator,
resizable) {}
StorageImpl& operator=(StorageImpl&& other) = delete;
StorageImpl& operator=(const StorageImpl&) = delete;
StorageImpl() = delete;
StorageImpl(StorageImpl&& other) = delete;
StorageImpl(const StorageImpl&) = delete;
~StorageImpl() override = default;
void reset() {
data_ptr_.clear();
size_bytes_ = 0;
size_bytes_is_heap_allocated_ = false;
}
// Destructor doesn't call release_resources because it's
// unnecessary; don't forget to change that if needed!
void release_resources() override {
data_ptr_.clear();
}
size_t nbytes() const {
// OK to do this instead of maybe_as_int as nbytes is guaranteed positive
TORCH_CHECK(!size_bytes_is_heap_allocated_);
return size_bytes_.as_int_unchecked();
}
SymInt sym_nbytes() const {
return size_bytes_;
}
// TODO: remove later
void set_nbytes(size_t size_bytes) {
size_bytes_ = size_bytes;
size_bytes_is_heap_allocated_ = false;
}
void set_nbytes(c10::SymInt size_bytes) {
size_bytes_ = std::move(size_bytes);
}
bool resizable() const {
return resizable_;
}
at::DataPtr& mutable_data_ptr() {
return data_ptr_;
}
const at::DataPtr& data_ptr() const {
return data_ptr_;
}
// Returns the previous data_ptr
at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) {
at::DataPtr old_data_ptr(std::move(data_ptr_));
data_ptr_ = std::move(data_ptr);
return old_data_ptr;
}
void set_data_ptr_noswap(at::DataPtr&& data_ptr) {
data_ptr_ = std::move(data_ptr);
}
const void* data() const {
return data_ptr_.get();
}
void* mutable_data() {
return data_ptr_.mutable_get();
}
at::DeviceType device_type() const {
return data_ptr_.device().type();
}
at::Allocator* allocator() {
return allocator_;
}
const at::Allocator* allocator() const {
return allocator_;
}
// You generally shouldn't use this method, but it is occasionally
// useful if you want to override how a tensor will be reallocated,
// after it was already allocated (and its initial allocator was
// set)
void set_allocator(at::Allocator* allocator) {
allocator_ = allocator;
}
Device device() const {
return data_ptr_.device();
}
void set_resizable(bool resizable) {
if (resizable) {
// We need an allocator to be resizable
AT_ASSERT(allocator_);
}
resizable_ = resizable;
}
/**
* Can only be called when use_count is 1
*/
void UniqueStorageShareExternalPointer(
void* src,
size_t size_bytes,
DeleterFnPtr d = nullptr) {
UniqueStorageShareExternalPointer(
at::DataPtr(src, src, d, data_ptr_.device()), size_bytes);
}
/**
* Can only be called when use_count is 1
*/
void UniqueStorageShareExternalPointer(
at::DataPtr&& data_ptr,
size_t size_bytes) {
data_ptr_ = std::move(data_ptr);
size_bytes_ = size_bytes;
size_bytes_is_heap_allocated_ = false;
allocator_ = nullptr;
resizable_ = false;
}
// This method can be used only after storage construction and cannot be used
// to modify storage status
void set_received_cuda(bool received_cuda) {
received_cuda_ = received_cuda;
}
bool received_cuda() {
return received_cuda_;
}
private:
DataPtr data_ptr_;
SymInt size_bytes_;
bool size_bytes_is_heap_allocated_;
bool resizable_;
// Identifies that Storage was received from another process and doesn't have
// local to process cuda memory allocation
bool received_cuda_;
Allocator* allocator_;
impl::PyObjectSlot pyobj_slot_;
};
// Declare StorageImpl create function pointer types.
using StorageImplCreateHelper = intrusive_ptr<StorageImpl> (*)(
StorageImpl::use_byte_size_t,
SymInt size_bytes,
Allocator* allocator,
bool resizable);
C10_API void SetStorageImplCreate(DeviceType t, StorageImplCreateHelper fptr);
C10_API StorageImplCreateHelper GetStorageImplCreate(DeviceType t);
} // namespace c10
| 6,645
| 27.895652
| 79
|
h
|
null |
pytorch-main/c10/core/Stream.h
|
#pragma once
#include <c10/core/Device.h>
namespace c10 {
/// An index representing a specific stream. A StreamId is not independently
/// meaningful without knowing the Device it is associated with; try to
/// use Stream rather than StreamId directly.
///
/// StreamIds are opaque; they are assigned by some DeviceType-specific
/// numbering system which is not visible to the user. HOWEVER, we
/// guarantee that StreamId 0 is always a valid stream, and corresponds
/// to some sort of "default" stream.
using StreamId = int64_t;
struct C10_API StreamData3 {
StreamId stream_id;
DeviceIndex device_index;
DeviceType device_type;
};
// NB: I decided not to call the above StreamIndex to avoid confusion with
// DeviceIndex. This way, you access device index with index(), and stream id
// with id()
/**
* A stream is a software mechanism used to synchronize launched kernels
* without requiring explicit synchronizations between kernels. The basic
* model is that every kernel launch is associated with a stream: every
* kernel on the same stream is implicitly synchronized so that if I launch
* kernels A and B on the same stream, A is guaranteed to finish before B
* launches. If I want B to run concurrently with A, I must schedule
* it on a different stream.
*
* The Stream class is a backend agnostic value class representing a stream
* which I may schedule a kernel on. Every stream is associated with a device,
* which is recorded in stream, which is used to avoid confusion about which
* device a stream refers to.
*
* Streams are explicitly thread-safe, in the sense that it is OK to pass
* a Stream from one thread to another, and kernels queued from two different
* threads will still get serialized appropriately. (Of course, the
* time when the kernels get queued is undetermined unless you synchronize
* host side ;)
*
* Stream does NOT have a default constructor. Streams are for expert
* users; if you want to use Streams, we're going to assume you know
* how to deal with C++ template error messages if you try to
* resize() a vector of Streams.
*
* Known instances of streams in backends:
*
* - cudaStream_t (CUDA)
* - hipStream_t (HIP)
* - cl_command_queue (OpenCL) (NB: Caffe2's existing OpenCL integration
* does NOT support command queues.)
*
* Because this class is device agnostic, it cannot provide backend-specific
* functionality (e.g., get the cudaStream_t of a CUDA stream.) There are
* wrapper classes which provide this functionality, e.g., CUDAStream.
*/
class C10_API Stream final {
private:
Device device_;
StreamId id_;
public:
enum Unsafe { UNSAFE };
enum Default { DEFAULT };
/// Unsafely construct a stream from a Device and a StreamId. In
/// general, only specific implementations of streams for a
/// backend should manufacture Stream directly in this way; other users
/// should use the provided APIs to get a stream. In particular,
/// we don't require backends to give any guarantees about non-zero
/// StreamIds; they are welcome to allocate in whatever way they like.
explicit Stream(Unsafe, Device device, StreamId id)
: device_(device), id_(id) {}
/// Construct the default stream of a Device. The default stream is
/// NOT the same as the current stream; default stream is a fixed stream
/// that never changes, whereas the current stream may be changed by
/// StreamGuard.
explicit Stream(Default, Device device) : device_(device), id_(0) {}
bool operator==(const Stream& other) const noexcept {
return this->device_ == other.device_ && this->id_ == other.id_;
}
bool operator!=(const Stream& other) const noexcept {
return !(*this == other);
}
Device device() const noexcept {
return device_;
}
DeviceType device_type() const noexcept {
return device_.type();
}
DeviceIndex device_index() const noexcept {
return device_.index();
}
StreamId id() const noexcept {
return id_;
}
// Enqueues a wait instruction in the stream's work queue.
// This instruction is a no-op unless the event is marked
// for recording. In that case the stream stops processing
// until the event is recorded.
template <typename T>
void wait(const T& event) const {
event.block(*this);
}
// Return whether all asynchronous work previously enqueued on this stream
// has completed running on the device.
bool query() const;
// Wait (by blocking the calling thread) until all asynchronous work enqueued
// on this stream has completed running on the device.
void synchronize() const;
// The purpose of this function is to more conveniently permit binding
// of Stream to and from Python. Without packing, I have to setup a whole
// class with two fields (device and stream id); with packing I can just
// store a single uint64_t.
//
// The particular way we pack streams into a uint64_t is considered an
// implementation detail and should not be relied upon.
uint64_t hash() const noexcept {
// Concat these together into a 64-bit integer
uint64_t bits = static_cast<uint64_t>(device_type()) << 56 |
static_cast<uint64_t>(device_index()) << 48 |
// Remove the sign extension part of the 64-bit address because
// the id might be used to hold a pointer.
(static_cast<uint64_t>(id()) & ((1ull << 48) - 1));
return bits;
}
struct StreamData3 pack3() const {
return {id(), device_index(), device_type()};
}
static Stream unpack3(
StreamId stream_id,
DeviceIndex device_index,
DeviceType device_type) {
TORCH_CHECK(isValidDeviceType(device_type));
return Stream(UNSAFE, Device(device_type, device_index), stream_id);
}
// I decided NOT to provide setters on this class, because really,
// why would you change the device of a stream? Just construct
// it correctly from the beginning dude.
};
C10_API std::ostream& operator<<(std::ostream& stream, const Stream& s);
} // namespace c10
namespace std {
template <>
struct hash<c10::Stream> {
size_t operator()(c10::Stream s) const noexcept {
return std::hash<uint64_t>{}(s.hash());
}
};
} // namespace std
| 6,190
| 35.417647
| 79
|
h
|
null |
pytorch-main/c10/core/StreamGuard.h
|
#pragma once
#include <c10/core/impl/InlineStreamGuard.h>
namespace c10 {
/**
* A StreamGuard is an RAII class that changes the current device
* to the device corresponding to some stream, and changes the
* default stream on that device to be this stream.
*
* Use of StreamGuard is HIGHLY discouraged in operator definitions. In
* a single operator, you probably don't know enough about the global
* state of the world to profitably decide how to set streams. Let
* the caller handle this appropriately, and just use the current stream
* in your operator code.
*
* This StreamGuard does NOT have an uninitialized state; it is guaranteed
* to reset the stream and device on exit. If you are in a situation
* where you *might* want to setup a stream guard, see OptionalStreamGuard.
*/
struct StreamGuard {
/// No default constructor, see Note [Omitted default constructor from RAII]
explicit StreamGuard() = delete;
/// Set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
explicit StreamGuard(Stream stream) : guard_(stream) {}
/// Copy is disallowed
StreamGuard(const StreamGuard&) = delete;
StreamGuard& operator=(const StreamGuard&) = delete;
/// Move is disallowed, as StreamGuard does not have an uninitialized state,
/// which is required for moves on types with nontrivial destructors.
StreamGuard(StreamGuard&& other) = delete;
StreamGuard& operator=(StreamGuard&& other) = delete;
/// Resets the currently set stream to the original stream and
/// the currently set device to the original device. Then,
/// set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
///
/// NOTE: this implementation may skip some stream/device setting if
/// it can prove that it is unnecessary.
///
/// WARNING: reset_stream does NOT preserve previously set streams on
/// different devices. If you need to set streams on multiple devices
/// on , use MultiStreamGuard instead.
void reset_stream(Stream stream) {
guard_.reset_stream(stream);
}
/// Returns the stream that was set at the time the guard was constructed.
Stream original_stream() const {
return guard_.original_stream();
}
/// Returns the most recent stream that was set using this device guard,
/// either from construction, or via set_stream.
Stream current_stream() const {
return guard_.current_stream();
}
/// Returns the most recent device that was set using this device guard,
/// either from construction, or via set_device/reset_device/set_index.
Device current_device() const {
return guard_.current_device();
}
/// Returns the device that was set at the most recent reset_stream(),
/// or otherwise the device at construction time.
Device original_device() const {
return guard_.original_device();
}
private:
c10::impl::InlineStreamGuard<impl::VirtualGuardImpl> guard_;
};
/**
* An OptionalStreamGuard is an RAII class that sets a device to some value on
* initialization, and resets the device to its original value on destruction.
* See OptionalDeviceGuard for more guidance on how to use this class.
*/
struct OptionalStreamGuard {
/// Create an uninitialized guard.
explicit OptionalStreamGuard() = default;
/// Set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
explicit OptionalStreamGuard(Stream stream) : guard_(stream) {}
/// Set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream,
/// if the passed stream is not nullopt.
explicit OptionalStreamGuard(optional<Stream> stream_opt)
: guard_(stream_opt) {}
/// Copy is disallowed
OptionalStreamGuard(const OptionalStreamGuard&) = delete;
OptionalStreamGuard& operator=(const OptionalStreamGuard&) = delete;
// See Note [Move construction for RAII guards is tricky]
OptionalStreamGuard(OptionalStreamGuard&& other) = delete;
// See Note [Move assignment for RAII guards is tricky]
OptionalStreamGuard& operator=(OptionalStreamGuard&& other) = delete;
/// Resets the currently set stream to the original stream and
/// the currently set device to the original device. Then,
/// set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
/// Initializes the guard if it was not previously initialized.
void reset_stream(Stream stream) {
guard_.reset_stream(stream);
}
/// Returns the stream that was set at the time the guard was most recently
/// initialized, or nullopt if the guard is uninitialized.
optional<Stream> original_stream() const {
return guard_.original_stream();
}
/// Returns the most recent stream that was set using this stream guard,
/// either from construction, or via reset_stream, if the guard is
/// initialized, or nullopt if the guard is uninitialized.
optional<Stream> current_stream() const {
return guard_.current_stream();
}
/// Restore the original device and stream, resetting this guard to
/// uninitialized state.
void reset() {
guard_.reset();
}
private:
c10::impl::InlineOptionalStreamGuard<impl::VirtualGuardImpl> guard_{};
};
/**
* A MultiStreamGuard is an RAII class that sets the current streams of a set of
* devices all at once, and resets them to their original values on destruction.
*/
struct MultiStreamGuard {
/// Set the current streams to the passed streams on each of their respective
/// devices.
explicit MultiStreamGuard(ArrayRef<Stream> streams) : guard_(streams) {}
/// Copy is disallowed
MultiStreamGuard(const MultiStreamGuard&) = delete;
MultiStreamGuard& operator=(const MultiStreamGuard&) = delete;
// See Note [Move construction for RAII guards is tricky]
MultiStreamGuard(MultiStreamGuard&& other) = delete;
// See Note [Move assignment for RAII guards is tricky]
MultiStreamGuard& operator=(MultiStreamGuard&& other) = delete;
private:
c10::impl::InlineMultiStreamGuard<impl::VirtualGuardImpl> guard_;
};
} // namespace c10
| 6,314
| 37.042169
| 80
|
h
|
null |
pytorch-main/c10/core/SymBool.h
|
#pragma once
#include <c10/core/SymNodeImpl.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/intrusive_ptr.h>
namespace c10 {
class C10_API SymBool {
public:
/*implicit*/ SymBool(bool b) : data_(b){};
SymBool(SymNode ptr) : data_(false), ptr_(std::move(ptr)) {
TORCH_CHECK(ptr_->is_bool());
};
SymBool() : data_(false) {}
SymNodeImpl* toSymNodeImplUnowned() const {
return ptr_.get();
}
SymNodeImpl* release() && {
return std::move(ptr_).release();
}
// Only valid if is_symbolic()
SymNode toSymNodeImpl() const;
// Guaranteed to return a SymNode, wrapping using base if necessary
SymNode wrap_node(const SymNode& base) const;
bool expect_bool() const {
TORCH_CHECK(!is_symbolic());
return data_;
}
SymBool sym_and(const SymBool&) const;
SymBool sym_or(const SymBool&) const;
SymBool sym_not() const;
SymBool operator&(const SymBool& other) const {
return sym_and(other);
}
SymBool operator|(const SymBool& other) const {
return sym_or(other);
}
SymBool operator~() const {
return sym_not();
}
// Insert a guard for the bool to be its concrete value, and then return
// that value. Note that C++ comparison operations default to returning
// bool, so it's not so common to have to call this
bool guard_bool(const char* file, int64_t line) const;
bool has_hint() const;
C10_ALWAYS_INLINE bool is_symbolic() const {
return ptr_;
}
bool as_bool_unchecked() const {
return data_;
}
private:
// TODO: optimize to union
bool data_;
SymNode ptr_;
};
C10_API std::ostream& operator<<(std::ostream& os, const SymBool& s);
} // namespace c10
| 1,697
| 21.945946
| 74
|
h
|
null |
pytorch-main/c10/core/SymFloat.h
|
#pragma once
#include <c10/core/SymBool.h>
#include <c10/core/SymNodeImpl.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/intrusive_ptr.h>
#include <limits>
namespace c10 {
// NB: this is actually double precision; we're using the Python naming here
class C10_API SymFloat {
public:
/*implicit*/ SymFloat(double d) : data_(d){};
SymFloat(SymNode ptr)
: data_(std::numeric_limits<double>::quiet_NaN()), ptr_(std::move(ptr)) {
TORCH_CHECK(ptr_->is_float());
};
SymFloat() : data_(0.0) {}
SymNodeImpl* toSymNodeImplUnowned() const {
return ptr_.get();
}
SymNodeImpl* release() && {
return std::move(ptr_).release();
}
// Only valid if is_symbolic()
SymNode toSymNodeImpl() const;
// Guaranteed to return a SymNode, wrapping using base if necessary
SymNode wrap_node(const SymNode& base) const;
double expect_float() const {
TORCH_CHECK(!is_symbolic());
return data_;
}
SymFloat operator+(const SymFloat&) const;
SymFloat operator-(const SymFloat&) const;
SymFloat operator*(const SymFloat&) const;
SymFloat operator/(const SymFloat&) const;
SymBool sym_eq(const SymFloat&) const;
SymBool sym_ne(const SymFloat&) const;
SymBool sym_lt(const SymFloat&) const;
SymBool sym_le(const SymFloat&) const;
SymBool sym_gt(const SymFloat&) const;
SymBool sym_ge(const SymFloat&) const;
bool operator==(const SymFloat& o) const {
return sym_eq(o).guard_bool(__FILE__, __LINE__);
}
bool operator!=(const SymFloat& o) const {
return sym_ne(o).guard_bool(__FILE__, __LINE__);
}
bool operator<(const SymFloat& o) const {
return sym_lt(o).guard_bool(__FILE__, __LINE__);
}
bool operator<=(const SymFloat& o) const {
return sym_le(o).guard_bool(__FILE__, __LINE__);
}
bool operator>(const SymFloat& o) const {
return sym_gt(o).guard_bool(__FILE__, __LINE__);
}
bool operator>=(const SymFloat& o) const {
return sym_ge(o).guard_bool(__FILE__, __LINE__);
}
SymFloat min(const SymFloat& sci) const;
SymFloat max(const SymFloat& sci) const;
// Need guidance on where to put this code
SymFloat sqrt() const;
// Insert a guard for the float to be its concrete value, and then return
// that value. This operation always works, even if the float is symbolic,
// so long as we know what the underlying value is. Don't blindly put this
// everywhere; you can cause overspecialization of PyTorch programs with
// this method.
//
// It should be called as guard_float(__FILE__, __LINE__). The file and line
// number can be used to diagnose overspecialization.
double guard_float(const char* file, int64_t line) const;
bool has_hint() const;
// N.B. It's important to keep this definition in the header
// as we expect if checks to be folded for mobile builds
// where `is_symbolic` is always false
C10_ALWAYS_INLINE bool is_symbolic() const {
return ptr_;
}
double as_float_unchecked() const {
return data_;
}
private:
// TODO: optimize to union
double data_;
SymNode ptr_;
};
C10_API std::ostream& operator<<(std::ostream& os, const SymFloat& s);
} // namespace c10
| 3,175
| 27.872727
| 79
|
h
|
null |
pytorch-main/c10/core/SymInt.h
|
#pragma once
#include <c10/core/SymBool.h>
#include <c10/core/SymNodeImpl.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <numeric>
#include <type_traits>
namespace c10 {
class SymFloat;
// SymInt represents either a regular int64_t, or a symbolic integer
// (represented in a type erased way as SymNode). The intention is for SymInt
// to represent symbolic sizes that arise when doing shape computation in
// operator kernels. This allows for tracing through programs without baking in
// concrete sizes into kernel calls.
//
// SymInt has an API equivalent to int64_t. In particular, it is a value type.
// Internally, SymInt is represented in a clever packed way, so that it only
// occupies one word of space; but morally, it is a union between an int64_t
// and an intrusive pointer to SymNodeImpl.
//
// Invariant: the referenced SymNodeImpl is guaranteed to be a SymNode where
// is_int() returns true
class C10_API SymInt {
public:
enum Unchecked {
UNCHECKED,
};
/*implicit*/ SymInt(int64_t d) : data_(d) {
if (is_heap_allocated()) {
// Large negative number, heap allocate it
promote_to_negative();
}
};
SymInt() : data_(0) {}
SymInt(SymNode n);
// unchecked c-tor accepting raw `data_`
// One appropriate use for this is when you are constructing a symint
// in a situation where you know it is non-negative (or, if it is negative,
// the negative value is -1; i.e., not user controlled)
SymInt(Unchecked, int64_t d) : data_(d) {}
// TODO: these implementations are not optimal because they allocate a
// temporary and then use the move constructor/assignment
SymInt(const SymInt& s) : data_(0) {
if (s.is_heap_allocated()) {
*this = SymInt(s.toSymNode());
} else {
data_ = s.data_;
}
}
SymInt(SymInt&& s) noexcept : data_(s.data_) {
s.data_ = 0;
}
SymInt& operator=(const SymInt& s) {
if (this != &s) {
if (s.is_heap_allocated()) {
*this = SymInt(s.toSymNode());
} else {
data_ = s.data_;
}
}
return *this;
}
SymInt& operator=(SymInt&& s) noexcept {
if (this != &s) {
release_(); // release the current SymNode if any
data_ = s.data_;
if (s.is_heap_allocated())
s.data_ = 0;
};
return *this;
}
SymNodeImpl* toSymNodeImplUnowned() const {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(is_heap_allocated());
uint64_t unextended_bits = static_cast<uint64_t>(data_) & ~MASK;
uint64_t sign_bit_mask = 1ULL << (62 - 1);
// https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c
uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask;
return static_cast<SymNodeImpl*>(
reinterpret_cast<void*>(static_cast<uintptr_t>(extended_bits)));
}
void release_() {
if (is_heap_allocated()) {
SymNode::reclaim(toSymNodeImplUnowned()); // steal
}
}
SymNodeImpl* release() && {
#ifndef C10_MOBILE
TORCH_INTERNAL_ASSERT(is_heap_allocated());
auto* r = toSymNodeImplUnowned();
data_ = 0; // transfer ownership
return r;
#else
TORCH_INTERNAL_ASSERT(false);
#endif
}
// Only valid if is_heap_allocated()
SymNode toSymNode() const;
// Guaranteed to return a SymNode, wrapping using base if necessary
SymNode wrap_node(const SymNode& base) const;
~SymInt() {
release_();
}
// Require the int to be non-symbolic, and if it is symbolic raise an
// error. This is safe to use for C++ code that doesn't work for symbolic
// shapes, and you don't have time to fix it immediately, as if we
// try to trigger the path in C++ you'll appropriately get an error
int64_t expect_int() const {
if (auto r = maybe_as_int()) {
return *r;
}
TORCH_CHECK(false, "expected int but got ", *this);
}
// Test if we have a hint for this int (e.g., guard_int would work).
// Most of the time this is true; it is only false when you have
// an unbacked SymInt.
bool has_hint() const;
// Insert a guard for the int to be its concrete value, and then return
// that value. This operation always works, even if the int is symbolic,
// so long as we know what the underlying value is (e.g., this won't work
// if you call it on the size of nonzero output). Don't blindly put this
// everywhere; you can cause overspecialization of PyTorch programs with
// this method.
//
// It should be called as guard_int(__FILE__, __LINE__). The file and line
// number can be used to diagnose overspecialization.
int64_t guard_int(const char* file, int64_t line) const;
// N.B. It's important to keep this definition in the header
// as we expect if checks to be folded for mobile builds
// where `is_heap_allocated` is always false and optimize dead code paths
C10_ALWAYS_INLINE bool is_heap_allocated() const {
#ifdef C10_MOBILE
return false;
#else
return !check_range(data_);
#endif
}
SymInt operator+(const SymInt& sci) const;
SymInt operator-(const SymInt& sci) const;
SymInt operator*(const SymInt& sci) const;
SymInt operator/(const SymInt& sci) const;
SymInt operator%(const SymInt& sci) const;
void operator*=(const SymInt& sci);
void operator+=(const SymInt& sci);
void operator/=(const SymInt& sci);
SymInt clone() const;
SymBool sym_eq(const SymInt&) const;
SymBool sym_ne(const SymInt&) const;
SymBool sym_lt(const SymInt&) const;
SymBool sym_le(const SymInt&) const;
SymBool sym_gt(const SymInt&) const;
SymBool sym_ge(const SymInt&) const;
bool operator==(const SymInt& o) const {
return sym_eq(o).guard_bool(__FILE__, __LINE__);
}
bool operator!=(const SymInt& o) const {
return sym_ne(o).guard_bool(__FILE__, __LINE__);
}
bool operator<(const SymInt& o) const {
return sym_lt(o).guard_bool(__FILE__, __LINE__);
}
bool operator<=(const SymInt& o) const {
return sym_le(o).guard_bool(__FILE__, __LINE__);
}
bool operator>(const SymInt& o) const {
return sym_gt(o).guard_bool(__FILE__, __LINE__);
}
bool operator>=(const SymInt& o) const {
return sym_ge(o).guard_bool(__FILE__, __LINE__);
}
SymInt min(const SymInt& sci) const;
SymInt max(const SymInt& sci) const;
operator SymFloat() const;
// Don't use this. Prefer maybe_as_int instead
int64_t as_int_unchecked() const {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_heap_allocated());
return data_;
}
c10::optional<int64_t> maybe_as_int() const {
if (!is_heap_allocated()) {
return c10::make_optional(data_);
}
auto* node = toSymNodeImplUnowned();
int64_t c = node->large_negative_int();
if (c != 0) {
return c10::make_optional(c);
}
c10::optional<int64_t> d = node->maybe_as_int();
if (d.has_value()) {
return d;
}
return c10::nullopt;
}
// Return whether the integer is directly coercible to a SymInt
// without requiring heap allocation. You don't need to use this
// to check if you can pass an integer to SymInt; this is guaranteed
// to work (it just might heap allocate!)
static bool check_range(int64_t i) {
return i > MAX_UNREPRESENTABLE_INT;
}
// Return the min representable integer as a SymInt without
// heap allocation. For quantities that count bytes (or larger),
// this is still much larger than you need, so you may consider
// using this as a more efficient version of MIN_INT
static constexpr int64_t min_representable_int() {
return MAX_UNREPRESENTABLE_INT + 1;
}
private:
void promote_to_negative();
// Constraints on the internal representation:
//
// - Should represent positive and small negative ints
// - No conversion necessary for operations on ints
// - Must represent valid 64-bit pointers
// - Is symbolic test should be FAST (two arithmetic instructions is too
// much).
// This code being a hotpath is based on Strobelight profiles of
// is_heap_allocated(). FB only: https://fburl.com/strobelight/5l50ncxd
// (you will need to change the time window).
//
// So, the scheme is to reserve large negative numbers (assuming
// two's complement):
//
// - 0b0.... means we are a positive int
// - 0b11... means we are a small negative int
// - 0b10... means we are are a pointer. This means that
// [-2^63, -2^62-1] are not representable as ints.
// We don't actually need all of this space as on x86_64
// as the top 16bits aren't used for anything
static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62 | 1ULL << 61;
static constexpr uint64_t IS_SYM = 1ULL << 63 | 1ULL << 61;
// We must manually translate the bit pattern test into a greater
// than test because compiler doesn't figure it out:
// https://godbolt.org/z/356aferaW
static constexpr int64_t MAX_UNREPRESENTABLE_INT =
-1LL & static_cast<int64_t>(~(1ULL << 62));
int64_t data_;
};
/// Sum of a list of SymInt; accumulates into the c10::SymInt expression
template <
typename C,
typename std::enable_if<
std::is_same<typename C::value_type, c10::SymInt>::value,
int>::type = 0>
inline c10::SymInt multiply_integers(const C& container) {
return std::accumulate(
container.begin(),
container.end(),
c10::SymInt(1),
[](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
}
template <
typename Iter,
typename = std::enable_if_t<std::is_same<
typename std::iterator_traits<Iter>::value_type,
c10::SymInt>::value>>
inline c10::SymInt multiply_integers(Iter begin, Iter end) {
return std::accumulate(
begin,
end,
c10::SymInt(1),
[](const c10::SymInt& a, const c10::SymInt& b) { return a * b; });
}
#define DECLARE_SYMINT_OP_INTONLY(scalar_t, RetTy) \
C10_API RetTy operator%(const SymInt& a, scalar_t b); \
C10_API RetTy operator%(scalar_t a, const SymInt& b);
#define DECLARE_SYMINT_OP(scalar_t, RetTy) \
C10_API RetTy operator+(const SymInt& a, scalar_t b); \
C10_API RetTy operator-(const SymInt& a, scalar_t b); \
C10_API RetTy operator*(const SymInt& a, scalar_t b); \
C10_API RetTy operator/(const SymInt& a, scalar_t b); \
C10_API RetTy operator+(scalar_t a, const SymInt& b); \
C10_API RetTy operator-(scalar_t a, const SymInt& b); \
C10_API RetTy operator*(scalar_t a, const SymInt& b); \
C10_API RetTy operator/(scalar_t a, const SymInt& b); \
C10_API bool operator==(const SymInt& a, scalar_t b); \
C10_API bool operator!=(const SymInt& a, scalar_t b); \
C10_API bool operator<(const SymInt& a, scalar_t b); \
C10_API bool operator<=(const SymInt& a, scalar_t b); \
C10_API bool operator>(const SymInt& a, scalar_t b); \
C10_API bool operator>=(const SymInt& a, scalar_t b); \
C10_API bool operator==(scalar_t a, const SymInt& b); \
C10_API bool operator!=(scalar_t a, const SymInt& b); \
C10_API bool operator<(scalar_t a, const SymInt& b); \
C10_API bool operator<=(scalar_t a, const SymInt& b); \
C10_API bool operator>(scalar_t a, const SymInt& b); \
C10_API bool operator>=(scalar_t a, const SymInt& b);
DECLARE_SYMINT_OP_INTONLY(int64_t, SymInt)
DECLARE_SYMINT_OP_INTONLY(int32_t, SymInt)
DECLARE_SYMINT_OP_INTONLY(uint64_t, SymInt)
DECLARE_SYMINT_OP_INTONLY(uint32_t, SymInt)
DECLARE_SYMINT_OP(int64_t, SymInt)
DECLARE_SYMINT_OP(int32_t, SymInt) // make sure constants work
DECLARE_SYMINT_OP(uint64_t, SymInt)
DECLARE_SYMINT_OP(uint32_t, SymInt)
DECLARE_SYMINT_OP(double, SymFloat)
DECLARE_SYMINT_OP(float, SymFloat) // just for completeness
// On OSX size_t is different than uint64_t so we have to
// define it separately
#if defined(__APPLE__)
DECLARE_SYMINT_OP_INTONLY(size_t, SymInt)
DECLARE_SYMINT_OP(size_t, SymInt)
#endif
#undef DECLARE_SYMINT_OP
C10_API std::ostream& operator<<(std::ostream& os, const SymInt& s);
C10_API SymInt operator-(const SymInt& s);
} // namespace c10
| 11,961
| 33.373563
| 95
|
h
|
null |
pytorch-main/c10/core/SymIntArrayRef.h
|
#pragma once
#include <c10/core/SymInt.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
namespace c10 {
using SymIntArrayRef = ArrayRef<SymInt>;
inline at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar) {
return IntArrayRef(reinterpret_cast<const int64_t*>(ar.data()), ar.size());
}
// TODO: a SymIntArrayRef containing a heap allocated large negative integer
// can actually technically be converted to an IntArrayRef... but not with
// the non-owning API we have here. We can't reinterpet cast; we have to
// allocate another buffer and write the integers into it. If you need it,
// we can do it. But I don't think you need it.
inline c10::optional<at::IntArrayRef> asIntArrayRefSlowOpt(
c10::SymIntArrayRef ar) {
for (const c10::SymInt& sci : ar) {
if (sci.is_heap_allocated()) {
return c10::nullopt;
}
}
return {asIntArrayRefUnchecked(ar)};
}
inline at::IntArrayRef asIntArrayRefSlow(
c10::SymIntArrayRef ar,
const char* file,
int64_t line) {
for (const c10::SymInt& sci : ar) {
TORCH_CHECK(
!sci.is_heap_allocated(),
file,
":",
line,
": SymIntArrayRef expected to contain only concrete integers");
}
return asIntArrayRefUnchecked(ar);
}
#define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__)
// Prefer using a more semantic constructor, like
// fromIntArrayRefKnownNonNegative
inline SymIntArrayRef fromIntArrayRefUnchecked(IntArrayRef array_ref) {
return SymIntArrayRef(
reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size());
}
inline SymIntArrayRef fromIntArrayRefKnownNonNegative(IntArrayRef array_ref) {
return fromIntArrayRefUnchecked(array_ref);
}
inline SymIntArrayRef fromIntArrayRefSlow(IntArrayRef array_ref) {
for (long i : array_ref) {
TORCH_CHECK(
SymInt::check_range(i),
"IntArrayRef contains an int that cannot be represented as a SymInt: ",
i);
}
return SymIntArrayRef(
reinterpret_cast<const SymInt*>(array_ref.data()), array_ref.size());
}
} // namespace c10
| 2,137
| 28.694444
| 80
|
h
|
null |
pytorch-main/c10/core/SymNodeImpl.h
|
#pragma once
#include <c10/macros/Export.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
#include <c10/util/intrusive_ptr.h>
namespace c10 {
class SymNodeImpl;
using SymNode = c10::intrusive_ptr<SymNodeImpl>;
// When you add a method, you also need to edit
// torch/csrc/jit/python/init.cpp
// torch/csrc/utils/python_symnode.h
// c10/core/ConstantSymNodeImpl.h
class C10_API SymNodeImpl : public c10::intrusive_ptr_target {
public:
~SymNodeImpl() override = default;
template <typename T>
c10::intrusive_ptr<T> dyn_cast() const {
return c10::intrusive_ptr<T>::reclaim_copy(dynamic_cast<T*>(this));
}
// these could be pure virtual when we implement LTC versions
virtual bool is_int() {
TORCH_CHECK(false, "NYI");
};
virtual bool is_bool() {
TORCH_CHECK(false, "NYI");
};
virtual bool is_float() {
TORCH_CHECK(false, "NYI");
};
virtual SymNode add(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode sub(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode mul(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode truediv(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode pow(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode floordiv(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode mod(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode eq(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode ne(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode gt(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode lt(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode le(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode ge(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode ceil() {
TORCH_CHECK(false, "NYI");
};
virtual SymNode floor() {
TORCH_CHECK(false, "NYI");
};
virtual SymNode neg() {
TORCH_CHECK(false, "NYI");
};
virtual SymNode sym_min(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode sym_max(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode sym_or(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode sym_and(const SymNode& other) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode sym_not() {
TORCH_CHECK(false, "NYI");
};
// NB: self is ignored here, only the arguments are used
virtual SymNode is_contiguous(
ArrayRef<SymNode> sizes,
ArrayRef<SymNode> strides) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode is_channels_last_contiguous_2d(
ArrayRef<SymNode> sizes,
ArrayRef<SymNode> strides) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode is_channels_last_contiguous_3d(
ArrayRef<SymNode> sizes,
ArrayRef<SymNode> strides) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode is_channels_last_strides_2d(
ArrayRef<SymNode> sizes,
ArrayRef<SymNode> strides) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode is_channels_last_strides_3d(
ArrayRef<SymNode> sizes,
ArrayRef<SymNode> strides) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode is_non_overlapping_and_dense(
ArrayRef<SymNode> sizes,
ArrayRef<SymNode> strides) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode clone() {
TORCH_CHECK(false, "NYI");
};
virtual SymNode sym_float() {
TORCH_CHECK(false, "NYI");
}
virtual SymNode wrap_int(int64_t num) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode wrap_float(double num) {
TORCH_CHECK(false, "NYI");
};
virtual SymNode wrap_bool(bool num) {
TORCH_CHECK(false, "NYI");
};
virtual int64_t guard_int(const char* file, int64_t line) {
TORCH_CHECK(false, "NYI");
};
virtual bool guard_bool(const char* file, int64_t line) {
TORCH_CHECK(false, "NYI");
};
virtual double guard_float(const char* file, int64_t line) {
TORCH_CHECK(false, "NYI");
};
virtual int64_t int_() {
TORCH_CHECK(false, "NYI");
};
virtual bool bool_() {
TORCH_CHECK(false, "NYI");
};
virtual bool has_hint() {
TORCH_CHECK(false, "NYI");
};
virtual std::string str() {
TORCH_CHECK(false, "NYI");
};
virtual int64_t large_negative_int() {
return 0; // not a large negative int!
}
virtual c10::optional<int64_t> maybe_as_int() {
return c10::nullopt;
}
std::ostream& operator<<(std::ostream& os) {
os << str();
return os;
};
};
} // namespace c10
| 4,720
| 25.227778
| 71
|
h
|
null |
pytorch-main/c10/core/UndefinedTensorImpl.h
|
#pragma once
#include <c10/core/TensorImpl.h>
namespace c10 {
struct C10_API UndefinedTensorImpl final : public TensorImpl {
public:
// Without this, we get:
// error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in
// device code
// (ostensibly because the constexpr tricks MSVC into trying to compile this
// function for device as well).
#ifdef _WIN32
static inline TensorImpl* singleton() {
#else
static constexpr inline TensorImpl* singleton() {
#endif
return &_singleton;
}
#ifdef DEBUG
bool has_storage() const override;
#endif
void set_storage_offset(int64_t offset) override;
protected:
bool is_contiguous_custom(MemoryFormat format) const override;
IntArrayRef strides_custom() const override;
SymIntArrayRef sym_strides_custom() const override;
private:
UndefinedTensorImpl();
static UndefinedTensorImpl _singleton;
const char* tensorimpl_type_name() const override;
};
} // namespace c10
| 964
| 24.394737
| 78
|
h
|
null |
pytorch-main/c10/core/WrapDimMinimal.h
|
#pragma once
#include <c10/core/SymInt.h>
namespace c10 {
namespace detail {
// This template can only be specialized at int64_t and c10::SymInt;
// you'll get linker errors otherwise
template <typename T>
C10_API T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar);
} // namespace detail
template <typename T>
T _maybe_wrap_dim(T dim, T dim_post_expr, bool wrap_scalar = true) {
// Inline the fast paths
if (C10_LIKELY(dim_post_expr * -1 <= dim && dim < dim_post_expr)) {
// For SymInts, we want an explicit control flow to trigger a guard, so we
// may as well branch too.
if (dim < 0) {
return dim + dim_post_expr;
}
return dim;
}
// Check edge-cases out-of-line (wrapping scalars and out-of-bounds errors)
return c10::detail::maybe_wrap_dim_slow<T>(
std::move(dim), std::move(dim_post_expr), wrap_scalar);
}
inline int64_t maybe_wrap_dim(
int64_t dim,
int64_t dim_post_expr,
bool wrap_scalar = true) {
return _maybe_wrap_dim(dim, dim_post_expr, wrap_scalar);
}
inline c10::SymInt maybe_wrap_dim(
c10::SymInt dim,
c10::SymInt dim_post_expr,
bool wrap_scalar = true) {
return _maybe_wrap_dim(std::move(dim), std::move(dim_post_expr), wrap_scalar);
}
} // namespace c10
| 1,260
| 27.022222
| 80
|
h
|
null |
pytorch-main/c10/core/thread_pool.h
|
#pragma once
#include <atomic>
#include <condition_variable>
#include <functional>
#include <mutex>
#include <queue>
#include <thread>
#include <utility>
#include <c10/util/numa.h>
#include <c10/util/thread_name.h>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32")
#endif
namespace c10 {
// TODO: move this to C10 and make it C10_API
class C10_API TaskThreadPoolBase {
public:
virtual void run(std::function<void()> func) = 0;
virtual size_t size() const = 0;
/**
* The number of available (i.e. idle) threads in this thread pool.
*/
virtual size_t numAvailable() const = 0;
/**
* Check if the current thread is from the thread pool.
*/
virtual bool inThreadPool() const = 0;
virtual ~TaskThreadPoolBase() noexcept = default;
static size_t defaultNumThreads() {
auto num_threads = std::thread::hardware_concurrency();
#if defined(_M_X64) || defined(__x86_64__)
num_threads /= 2;
#endif
return num_threads;
}
};
class C10_API ThreadPool : public c10::TaskThreadPoolBase {
protected:
struct task_element_t {
bool run_with_id;
const std::function<void()> no_id;
const std::function<void(std::size_t)> with_id;
explicit task_element_t(std::function<void()> f)
: run_with_id(false), no_id(std::move(f)), with_id(nullptr) {}
explicit task_element_t(std::function<void(std::size_t)> f)
: run_with_id(true), no_id(nullptr), with_id(std::move(f)) {}
};
std::queue<task_element_t> tasks_;
std::vector<std::thread> threads_;
mutable std::mutex mutex_;
std::condition_variable condition_;
std::condition_variable completed_;
std::atomic_bool running_;
bool complete_;
std::size_t available_;
std::size_t total_;
int numa_node_id_;
public:
ThreadPool() = delete;
explicit ThreadPool(
int pool_size,
int numa_node_id = -1,
std::function<void()> init_thread = nullptr);
~ThreadPool() override;
size_t size() const override;
size_t numAvailable() const override;
bool inThreadPool() const override;
void run(std::function<void()> func) override;
template <typename Task>
void runTaskWithID(Task task) {
std::unique_lock<std::mutex> lock(mutex_);
// Set task and signal condition variable so that a worker thread will
// wake up and use the task.
tasks_.emplace(static_cast<std::function<void(std::size_t)>>(task));
complete_ = false;
condition_.notify_one();
}
/// @brief Wait for queue to be empty
void waitWorkComplete();
private:
// @brief Entry point for pool threads.
void main_loop(std::size_t index);
};
class C10_API TaskThreadPool : public c10::ThreadPool {
public:
explicit TaskThreadPool(std::size_t pool_size, int numa_node_id = -1)
: ThreadPool(pool_size, numa_node_id, [numa_node_id]() {
setThreadName("CaffeTaskThread");
NUMABind(numa_node_id);
}) {}
};
C10_DECLARE_SHARED_REGISTRY(
ThreadPoolRegistry,
TaskThreadPoolBase,
int,
int,
bool);
} // namespace c10
C10_CLANG_DIAGNOSTIC_POP()
| 3,122
| 23.209302
| 74
|
h
|
null |
pytorch-main/c10/core/impl/DeviceGuardImplInterface.h
|
#pragma once
#include <c10/core/Device.h>
#include <c10/core/DeviceType.h>
#include <c10/core/Stream.h>
#include <c10/util/Exception.h>
// Just for C10_ANONYMOUS_VARIABLE
#include <c10/util/Registry.h>
#include <atomic>
namespace c10 {
// Forward declaration
class DataPtr;
/**
* Flags defining the behavior of events.
*
* PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The
* BACKEND_DEFAULT is what a particular backend would select if no
* flags were given. PYTORCH_DEFAULT is the PyTorch's framework default
* choice for events on that backend, which may not be the same. For example,
* when PyTorch creates a CUDA event it sets the flag
* CUDA_EVENT_DISABLING_TIMING by default to improve performance.
*
* The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each
* backend implementation. Backend-specific flags, like CUDA_EVENT_DEFAULT,
* should map one-to-one with actual event flags for those backends.
*/
enum class EventFlag {
PYTORCH_DEFAULT,
BACKEND_DEFAULT,
// CUDA flags
CUDA_EVENT_DEFAULT,
CUDA_EVENT_DISABLE_TIMING, // PyTorch-default for CUDA
// HIP flags
HIP_EVENT_DEFAULT,
HIP_EVENT_DISABLE_TIMING, // PyTorch-default for HIP
// FOR TESTING ONLY
INVALID
};
namespace impl {
/**
* DeviceGuardImplInterface represents the virtual interface which provides
* functionality to provide an RAII class for device and stream switching,
* via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is
* expected to implement and register an implementation of this interface.
* All classes which inherit from DeviceGuardImplInterface should be declared
* 'final'.
*
* This class exists because we provide a unified interface for performing
* device guards via DeviceGuard, but we cannot assume that we have actually
* compiled against the, e.g., CUDA library, which actually implements
* this guard functionality. In this case, a dynamic dispatch is required
* to cross the library boundary.
*
* If possible, you should directly use implementations of this interface;
* those uses will be devirtualized.
*/
struct C10_API DeviceGuardImplInterface {
/**
* Return the type of device managed by this guard implementation.
*/
virtual DeviceType type() const = 0;
/**
* Set the current device to Device, and return the previous Device.
*/
virtual Device exchangeDevice(Device) const = 0;
// NB: Implementations of exchangeDevice can be a bit boilerplatey. You might
// consider replacing exchangeDevice with a non-virtual function with a baked
// in implementation; however, note that this will triple the number of
// virtual calls (when you implement exchangeDevice in a final subclass,
// the compiler gets to devirtualize everything; it won't do that if you don't
// define it in the subclass!) A common way to solve this problem is to use
// some sort of CRTP; however, we can template DeviceGuardImplInterface since
// we really *do* need it to be virtual. A little boilerplate seems easiest
// to explain. (Another way around this problem is to provide inline
// functions that provide the default implementations, but this seems a little
// hard to explain. In any case, we're only going to have on order of ten
// implementations of this anyway.)
/**
* Get the current device.
*/
virtual Device getDevice() const = 0;
/**
* Set the current device to Device.
*/
virtual void setDevice(Device) const = 0;
/**
* Set the current device to Device, without checking for errors
* (so, e.g., this can be called from a destructor).
*/
virtual void uncheckedSetDevice(Device) const noexcept = 0;
/**
* Get the current stream for a given device.
*/
virtual Stream getStream(Device) const noexcept = 0;
/**
* Get the default stream for a given device.
*/
virtual Stream getDefaultStream(Device) const {
TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.")
}
/**
* Get a stream from the global pool for a given device.
*/
virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false)
const {
(void)isHighPriority; // Suppress unused varaible warning
TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.")
}
/**
* Set a stream to be the thread local current stream for its device.
* Return the previous stream for that device. You are NOT required
* to set the current device to match the device of this stream.
*/
virtual Stream exchangeStream(Stream) const noexcept = 0;
/**
* Destroys the given event.
*/
virtual void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
const noexcept {}
/**
* Increments the event's version and enqueues a job with this version
* in the stream's work queue. When the stream process that job
* it notifies all streams waiting on / blocked by that version of the
* event to continue and marks that version as recorded.
* */
virtual void record(
void** /*event*/,
const Stream& /*stream*/,
const DeviceIndex /*device_index*/,
const c10::EventFlag /*flag*/) const {
TORCH_CHECK(false, "Backend doesn't support events.");
}
/**
* Does nothing if the event has not been scheduled to be recorded.
* If the event was previously enqueued to be recorded, a command
* to wait for the version of the event that exists at the time of this call
* is inserted in the stream's work queue.
* When the stream reaches this command it will stop processing
* additional commands until that version of the event is marked as recorded.
*/
virtual void block(void* /*event*/, const Stream& /*stream*/) const {
TORCH_CHECK(false, "Backend doesn't support events.");
}
/**
* Returns true if (and only if)
* (1) the event has never been scheduled to be recorded
* (2) the current version is marked as recorded.
* Returns false otherwise.
*/
virtual bool queryEvent(void* /*event*/) const {
TORCH_CHECK(false, "Backend doesn't support events.");
}
/**
* Get the number of devices. WARNING: This is REQUIRED to not raise
* an exception. If there is some sort of problem, e.g., driver error,
* you should report that there are zero available devices.
*/
virtual DeviceIndex deviceCount() const noexcept = 0;
/**
* Return true if all the work previously enqueued on the stream for
* asynchronous execution has completed running on the device.
*/
virtual bool queryStream(const Stream& /*stream*/) const {
TORCH_CHECK(false, "Backend doesn't support querying streams.");
}
/**
* Wait (by blocking the calling thread) until all the work previously
* enqueued on the stream has completed running on the device.
*/
virtual void synchronizeStream(const Stream& /*stream*/) const {
TORCH_CHECK(false, "Backend doesn't support synchronizing streams.");
}
/**
* Ensure the caching allocator (if any) is aware that the given DataPtr is
* being used on the given stream, and that it should thus avoid recycling the
* DataPtr until all work on that stream is done.
*/
virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const {
}
/**
* Intended use of this class is to leak the DeviceGuardImpl at program end.
* So you better not call the destructor, buster!
*/
virtual ~DeviceGuardImplInterface() = default;
};
// A no-op device guard impl that doesn't do anything interesting. Useful
// for devices that don't actually have a concept of device index. Prominent
// examples are CPU and Meta.
template <DeviceType D>
struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface {
NoOpDeviceGuardImpl() = default;
DeviceType type() const override {
return D;
}
Device exchangeDevice(Device) const override {
return Device(D, -1); // no-op
}
Device getDevice() const override {
return Device(D, -1);
}
void setDevice(Device) const override {
// no-op
}
void uncheckedSetDevice(Device) const noexcept override {
// no-op
}
Stream getStream(Device) const noexcept override {
// no-op
return Stream(Stream::DEFAULT, Device(D, -1));
}
// NB: These do NOT set the current device
Stream exchangeStream(Stream) const noexcept override {
// no-op
return Stream(Stream::DEFAULT, Device(D, -1));
}
DeviceIndex deviceCount() const noexcept override {
return 1;
}
// Event-related functions
void record(
void** /*event*/,
const Stream& /*stream*/,
const DeviceIndex /*device_index*/,
const EventFlag /*flag*/) const override {
TORCH_CHECK(false, D, " backend doesn't support events.");
}
void block(void* /*event*/, const Stream& /*stream*/) const override {
TORCH_CHECK(false, D, " backend doesn't support events.")
}
bool queryEvent(void* /*event*/) const override {
TORCH_CHECK(false, D, " backend doesn't support events.")
}
void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
const noexcept override {}
// Stream-related functions
bool queryStream(const Stream& /*stream*/) const override {
return true;
}
void synchronizeStream(const Stream& /*stream*/) const override {
// Don't wait for anything.
}
};
// The registry is NON-owning. Each stored pointer is std::atomic so
// that under all interleavings of registry calls the structure is
// race-free. This doesn't cost us anything on reads in X86. (An
// unsynchronized implementation probably is OK too, but I didn't want
// to prove that we never read from device_guard_impl_registry at the
// same time some registration is occurring. Shiver.)
//
// I'd like this registry to be valid even at program destruction time
// (in case someone uses a DeviceGuard in a destructor to do some cleanup
// in the CUDA API.) Since there are no direct accesses of the underlying
// owning objects which I can use to enforce initialization order (unlike
// in a Meyer singleton), it implies that you must *leak* objects when
// putting them in the registry. This is done by deleting the destructor
// on DeviceGuardImplInterface.
extern C10_API std::atomic<const DeviceGuardImplInterface*>
device_guard_impl_registry[static_cast<size_t>(
DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)];
// I can't conveniently use c10/util/Registry.h for the following reason:
// c10/util/Registry.h gives me a slow way of Create'ing a object of some
// interface from the registry, but no way of quickly accessing an already
// created object. I'll be banging on getDeviceGuardImpl every time we do a
// DeviceGuard, so I really don't want to be doing an unordered_map lookup.
// Better if the registration mechanism directly drops its implementation
// into device_guard_impl_registry.
class C10_API DeviceGuardImplRegistrar {
public:
DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*);
};
#define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \
static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( \
g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl());
inline const DeviceGuardImplInterface* getDeviceGuardImpl(DeviceType type) {
// Two adjacent int16_t fields DeviceType and DeviceIndex has field access
// miscompiled on NVCC. To workaround this issue, we apply a mask to the
// DeviceType. First check if the DeviceType is 16-bit.
// FB employees can see
// https://fb.workplace.com/groups/llvm.gcc/permalink/4053565044692080/
// for more details
static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit");
auto p = device_guard_impl_registry[static_cast<size_t>(type) & 0xFF].load();
// This seems to be the first place where you make use of a device
// when you pass devices to factory functions. Give a nicer error
// message in this case.
TORCH_CHECK(p, "PyTorch is not linked with support for ", type, " devices");
return p;
}
inline bool hasDeviceGuardImpl(DeviceType type) {
return device_guard_impl_registry[static_cast<size_t>(type)].load();
}
} // namespace impl
} // namespace c10
| 12,187
| 36.045593
| 80
|
h
|
null |
pytorch-main/c10/core/impl/FakeGuardImpl.h
|
#pragma once
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <array>
namespace c10 {
namespace impl {
// FakeGuardImpl is hardcoded to have eight devices. Not for
// any good reason, just to simplify code.
constexpr DeviceIndex kFakeGuardImplMaxDevices = 8;
/**
* A fake implementation of DeviceGuardImplInterface suitable for testing.
* The current device is modeled as a mutable field in the guard implementation
* class. See DeviceGuard_test.cpp for an example use.
*/
template <DeviceType T>
struct FakeGuardImpl final : public DeviceGuardImplInterface {
static constexpr DeviceType static_type = T;
// Runtime device type is not used
FakeGuardImpl(DeviceType) {}
FakeGuardImpl() = default;
DeviceType type() const override {
return T;
}
Device exchangeDevice(Device d) const override {
AT_ASSERT(d.type() == type());
AT_ASSERT(d.index() < kFakeGuardImplMaxDevices);
Device old_device = getDevice();
if (old_device.index() != d.index()) {
current_device_ = d.index();
}
return old_device;
}
Device getDevice() const override {
return Device(type(), current_device_);
}
void setDevice(Device d) const override {
AT_ASSERT(d.type() == type());
AT_ASSERT(d.index() >= 0);
AT_ASSERT(d.index() < kFakeGuardImplMaxDevices);
current_device_ = d.index();
}
void uncheckedSetDevice(Device d) const noexcept override {
current_device_ = d.index();
}
Stream getStream(Device d) const noexcept override {
return Stream(Stream::UNSAFE, d, current_streams_[d.index()]);
}
Stream exchangeStream(Stream s) const noexcept override {
auto old_id = current_streams_[s.device_index()];
current_streams_[s.device_index()] = s.id();
return Stream(Stream::UNSAFE, s.device(), old_id);
}
DeviceIndex deviceCount() const noexcept override {
return kFakeGuardImplMaxDevices;
}
// Event-related functions
void record(
void** event,
const Stream& stream,
const DeviceIndex device_index,
const EventFlag flag) const override {}
void block(void* event, const Stream& stream) const override {}
bool queryEvent(void* event) const override {
return true;
}
void destroyEvent(void* event, const DeviceIndex device_index)
const noexcept override {}
// Convenience methods for testing
static DeviceIndex getDeviceIndex() {
return current_device_;
}
static void setDeviceIndex(DeviceIndex i) {
AT_ASSERT(i >= 0);
AT_ASSERT(i < kFakeGuardImplMaxDevices);
current_device_ = i;
}
static StreamId getCurrentStreamIdFor(DeviceIndex i) {
return current_streams_.at(i);
}
static void resetStreams() {
current_streams_.fill(0);
}
private:
thread_local static DeviceIndex current_device_;
thread_local static std::array<StreamId, kFakeGuardImplMaxDevices>
current_streams_;
};
template <DeviceType T>
thread_local DeviceIndex FakeGuardImpl<T>::current_device_ = 0;
template <DeviceType T>
constexpr DeviceType FakeGuardImpl<T>::static_type;
template <DeviceType T>
thread_local std::array<StreamId, kFakeGuardImplMaxDevices>
FakeGuardImpl<T>::current_streams_ = {0, 0, 0, 0, 0, 0, 0, 0};
} // namespace impl
} // namespace c10
| 3,237
| 28.981481
| 79
|
h
|
null |
pytorch-main/c10/core/impl/GPUTrace.h
|
#pragma once
#include <c10/core/impl/PyInterpreter.h>
namespace c10 {
namespace impl {
struct C10_API GPUTrace {
// On the x86 architecture the atomic operations are lock-less.
static std::atomic<const PyInterpreter*> gpuTraceState;
// When PyTorch migrates to C++20, this should be changed to an atomic flag.
// Currently, the access to this variable is not synchronized, on the basis
// that it will only be flipped once and by the first interpreter that
// accesses it.
static bool haveState;
// This function will only register the first interpreter that tries to invoke
// it. For all of the next ones it will be a no-op.
static void set_trace(const PyInterpreter*);
static const PyInterpreter* get_trace() {
if (!haveState)
return nullptr;
return gpuTraceState.load(std::memory_order_acquire);
}
};
} // namespace impl
} // namespace c10
| 889
| 27.709677
| 80
|
h
|
null |
pytorch-main/c10/core/impl/HermeticPyObjectTLS.h
|
#pragma once
#include <c10/macros/Export.h>
#include <atomic>
namespace c10 {
namespace impl {
// This TLS controls whether or not we permanently associate PyObject
// with Tensor the first time it is allocated. When hermetic PyObject
// TLS is enabled (state is true), we DO NOT save PyObjects to Tensor,
// meaning you get a distinct PyObject whenever you execute the code in
// question.
struct C10_API HermeticPyObjectTLS {
static void set_state(bool state);
static bool get_state() {
// Hypothetical fastpath if torchdeploy/multipy isn't used. Per
// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
// this qualifies relaxed access because it is a single-location data
// structure (only the boolean here).
//
// Forgetting about data races for a moment, is there a logical race?
//
// - Boolean only ever transitions from false to true. So the
// critical situation is when one interpreter is already running
// when a second interpreter switches haveState from false to true.
//
// - The first interpreter is indifferent whether or not it sees
// hasState true/false; obviously false works (this is what the
// interpreter was previously using; more directly, the interpreter
// calls into itself as the handler, so being hermetic is not
// required), and true simply means serviced python operator calls will
// be hermetic; in these cases it is expected to be functionally
// equivalent.
//
// - The second interpreter MUST see hasState true (as its requests will
// be forwarded to the first interpreter), but it is assumed that there
// is a synchronization between the interpreter initialization, and
// when we actually perform operations, so it is guaranteed to see
// hasState true.
//
// QED.
//
// This fastpath is currently disabled so that we can more easily test that
// hermetic mode works correctly even on stock build of PyTorch.
if (false && !haveState_.load(std::memory_order_relaxed))
return false;
return get_tls_state();
}
// Call this from the multipy/torchdeploy top level
static void init_state();
private:
// This only flipped once from false to true during torchdeploy/multipy
// initialization, and never again.
static std::atomic<bool> haveState_;
static bool get_tls_state();
};
} // namespace impl
} // namespace c10
| 2,471
| 38.870968
| 79
|
h
|
null |
pytorch-main/c10/core/impl/InlineDeviceGuard.h
|
#pragma once
// This file provides implementations of InlineDeviceGuard and
// InlineOptionalDeviceGuard.
#include <c10/core/Device.h>
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/core/impl/VirtualGuardImpl.h>
#include <c10/util/C++17.h>
#include <c10/util/Optional.h>
namespace c10 {
namespace impl {
/**
* A DeviceGuard is an RAII class that sets a device to some value
* on construction, and resets the device to its original value on
* destruction.
*
* InlineDeviceGuard is a helper class for implementing DeviceGuards.
* It is templated over a DeviceGuardImpl (anything that implements
* DeviceGuardImplInterface). There are two primary ways to instantiate
* InlineDeviceGuard:
*
* - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl.
* This is the best way to use InlineDeviceGuard, as all calls are
* devirtualized, giving you code as efficient as straight line
* calls to cudaGetDevice/cudaSetDevice.
*
* - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl
* retrieved from a DeviceType registry. We have explicitly instantiated
* InlineDeviceGuard this way as c10::DeviceGuard.
*
* If you are in a hurry, you can use InlineDeviceGuard directly:
*
* using CUDAGuard = impl::InlineDeviceGuard<CUDAGuardImpl>;
*
* However, you can provide a better user experience if you explicitly write a
* wrapper class that itself contains the template instantiation:
*
* class CUDAGuard {
* public:
* // ... the API ...
* private:
* impl::InlineDeviceGuard<CUDAGuardImpl> guard_;
* }
*
* The wrapper class provides a good place to write documentation, and helps
* avoid weird template instantiation errors when a user incorrectly uses the
* class.
*
* If you need to test this class, consider instantiating it with FakeGuardImpl.
*/
template <typename T>
class InlineDeviceGuard {
public:
// Note [Omitted default constructor from RAII]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// In principle, we could add a default constructor to
// DeviceGuard which reads the current device and promises to
// restore to that device on exit. However, most cases where you
// would have written this, you probably meant to actually just
// use OptionalDeviceGuard (since you don't actually need the
// restore to happen if you don't ever actually set the device).
// We remove the constructor here to encourage you to think about
// what you actually want to happen.
explicit InlineDeviceGuard() = delete;
/// Set the current device to the passed Device.
explicit InlineDeviceGuard(Device device)
: impl_(device.type()),
original_device_(
device.index() == -1 ? impl_.getDevice()
: impl_.exchangeDevice(device)),
current_device_(device.index() == -1 ? original_device_ : device) {}
/// Set the current device index to the passed DeviceIndex. (The
/// device type is inferred from the template parameter T).
template <
typename U = T,
typename = typename std::enable_if<
!std::is_same<U, VirtualGuardImpl>::value>::type>
explicit InlineDeviceGuard(DeviceIndex device_index)
: InlineDeviceGuard(Device(U::static_type, device_index)) {}
/// Construct an InlineDeviceGuard using VirtualGuardImpl with an explicit
/// DeviceGuardImplInterface pointer.
template <
typename U = T,
typename = typename std::enable_if<
std::is_same<U, VirtualGuardImpl>::value>::type>
explicit InlineDeviceGuard(
Device device,
const DeviceGuardImplInterface* impl)
: impl_(
VirtualGuardImpl(impl ? impl : getDeviceGuardImpl(device.type()))),
original_device_(
device.index() == -1 ? impl_.getDevice()
: impl_.exchangeDevice(device)),
current_device_(device.index() == -1 ? original_device_ : device) {}
/// Copy is disallowed
InlineDeviceGuard(const InlineDeviceGuard<T>&) = delete;
InlineDeviceGuard<T>& operator=(const InlineDeviceGuard<T>&) = delete;
/// Move is disallowed, as DeviceGuard does not have an uninitialized state,
/// which is required for moves on types with nontrivial destructors.
InlineDeviceGuard(InlineDeviceGuard<T>&& other) = delete;
InlineDeviceGuard& operator=(InlineDeviceGuard<T>&& other) = delete;
~InlineDeviceGuard() {
impl_.uncheckedSetDevice(original_device_);
}
/// Sets the device to the given one.
template <
typename U = T,
typename std::enable_if<!std::is_same<U, VirtualGuardImpl>::value, int>::
type = 0>
void set_device(at::Device device) {
AT_ASSERT(
(U::static_type == DeviceType::HIP && device.is_cuda()) ||
device.type() == U::static_type);
auto index = device.index();
if (index == -1)
return;
impl_.setDevice(device);
current_device_ = device;
}
/// Resets the currently set device to its original device, and then sets the
/// current device to the passed device. This is effectively equivalent to
/// set_device when a guard supports only a single device type.
template <typename U = T>
typename std::enable_if<!std::is_same<U, VirtualGuardImpl>::value>::type
reset_device(at::Device device) {
set_device(device);
}
/// Resets the currently set device to its original device, and then sets the
/// current device to the passed device (for a possibly different device
/// type).
///
/// This method is named reset_device to highlight the fact that previous
/// device settings from this guard are NOT preserved, even if the device
/// has a different device type. For example:
///
/// // CUDA device is 0
/// DeviceGuard g(Device(kCUDA, 1));
/// g.reset_device(Device(kHIP, 2));
/// // CUDA device is 0 (!!)
///
/// NOTE: this implementation may skip some device setting if it can prove
/// that it is unnecessary.
///
/// Optional argument is for testing only.
template <typename U = T>
typename std::enable_if<std::is_same<U, VirtualGuardImpl>::value>::type
reset_device(
at::Device device,
const impl::DeviceGuardImplInterface* impl = nullptr) {
auto index = device.index();
if (index == -1)
return;
if (device.type() == original_device_.type()) {
AT_ASSERT(impl == nullptr || impl->type() == device.type());
impl_.setDevice(device);
current_device_ = device;
} else {
// Destruct and reconstruct the DeviceGuard in place
impl_.setDevice(original_device_);
impl_ = !impl ? VirtualGuardImpl(device.type()) : VirtualGuardImpl(impl);
original_device_ = impl_.exchangeDevice(device);
current_device_ = device;
}
}
/// Sets the device index to the given one. The device type is inferred
/// from the original device type.
void set_index(DeviceIndex index) {
reset_device(Device(original_device_.type(), index));
}
/// Returns the device that was set at the time the most recent
/// reset_device(), or otherwise the device at construction time.
Device original_device() const {
return original_device_;
}
/// Returns the most recent device that was set using this device guard,
/// either from construction, or via set_device/reset_device/set_index.
Device current_device() const {
return current_device_;
}
protected:
T impl_;
private:
Device original_device_;
Device current_device_;
};
/**
* A OptionalDeviceGuard is an RAII class that sets a device to some value on
* initialization, and resets the device to its original value on destruction.
*
* InlineOptionalDeviceGuard is a helper class for implementing
* OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to
* use this. See OptionalDeviceGuard for user-oriented usage notes.
*/
template <typename T>
class InlineOptionalDeviceGuard {
public:
// Note [Explicit initialization of optional fields]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Explicit initialization of optional fields
// required to workaround an nvcc bug; see
// https://github.com/pytorch/pytorch/issues/12117
/// Creates an uninitialized OptionalDeviceGuard.
explicit InlineOptionalDeviceGuard()
: guard_() // See Note [Explicit initialization of optional fields]
{}
/// Set the current device to the passed Device, if it is not nullopt.
explicit InlineOptionalDeviceGuard(optional<Device> device_opt)
: guard_() { // See Note [Explicit initialization of optional fields]
if (device_opt.has_value()) {
guard_.emplace(device_opt.value());
}
}
/// Set the current device to the passed DeviceIndex, if it is not nullopt.
template <
typename U = T,
typename = typename std::enable_if<
!std::is_same<U, VirtualGuardImpl>::value>::type>
explicit InlineOptionalDeviceGuard(optional<DeviceIndex> device_index_opt)
: guard_() { // See Note [Explicit initialization of optional fields]
if (device_index_opt.has_value()) {
guard_.emplace(device_index_opt.value());
}
}
/// All constructors of DeviceGuard are valid for OptionalDeviceGuard
/// and result in initialized OptionalDeviceGuard.
template <typename... Args>
explicit InlineOptionalDeviceGuard(Args&&... args)
: guard_(in_place, std::forward<Args>(args)...) {}
// TODO: Consider reading Tensor and TensorList constructors here, when
// Tensor moves to c10. (These are only valid on OptionalDeviceGuard,
// because a Tensor may be undefined, in which case we need an uninitialized
// tensor guard.)
// Note [Move construction for RAII guards is tricky]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// In principle, move construction is useful for terminating
// the lifetime of a `OptionalDeviceGuard` early; for example:
//
// // current device is d0
// OptionalDeviceGuard g1(d1);
// // current device is d1
// {
// OptionalDeviceGuard g2(std::move(g1));
// }
// // current device is d0!!
//
// However, it's difficult to implement the move constructor
// in a way that works in all situations. For example, consider
// the following example:
//
// OptionalDeviceGuard g1(d1);
// {
// OptionalDeviceGuard g2(d2);
// {
// OptionalDeviceGuard g3(std::move(g1)); // !!!
// }
// }
//
// What should the current device be while g3 in scope... and what
// should it be after it goes out of scope? What about g2?
// There don't seem to be satisfactory answers for these questions.
//
// It's in principle possible to raise an error when this occurs
// by doing some extra thread-local bookkeeping. But why bother?
// Just don't provide the constructor.
InlineOptionalDeviceGuard(InlineOptionalDeviceGuard<T>&& other) = delete;
// Note [Move assignment for RAII guards is tricky]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Move assignment is deleted, because you need to know which guard was
// defined "first", as that guard's original_device_ wins--with the current
// representation, we have no way of telling which is the case. (Move
// construction does not have this problem, as one guard is always
// uninitialized.)
//
// We can make this clear by way of a pair of examples:
//
// Example 1:
//
// // initial device is n0
// {
// CUDAGuard g1(n1);
// {
// CUDAGuard g2(n2);
// // current device should be n2
// g1 = std::move(g2);
// // current device should still be n2
// }
// // current device should still be n2
// }
// // current device should be n0
//
// Example 2 (flip the order of the two guards):
//
// // initial device is n0
// {
// CUDAGuard g2(n2);
// {
// CUDAGuard g1(n1);
// // current device should be n1
// g1 = std::move(g2);
// // current device should be n2
// }
// // current device should be n0 (since g2 has been vacated)
// }
//
// In both examples, we need g1 to restore to n0 after move assignment.
// However, in example 1, this is determined by the restore value of g1
// (prior to the move). In example 2, however, it is determined by the the
// restore value of g2(!!). We don't know which one should win, without having
// a way of telling which guard was allocated first.
//
// We could solve this with an extra thread-local variable. But no one is
// actually using move-assignment. So just get rid of it.
InlineOptionalDeviceGuard& operator=(InlineOptionalDeviceGuard&& other) =
delete;
/// Sets the device to the given one. Initializes OptionalDeviceGuard if it
/// is not already initialized.
template <
typename U = T,
typename = typename std::enable_if<
!std::is_same<U, VirtualGuardImpl>::value>::type>
void set_device(at::Device device) {
if (!guard_.has_value()) {
guard_.emplace(device);
} else {
guard_->set_device(device);
}
}
/// Resets the currently set device to its original device, and then sets the
/// current device to the passed device (for a possibly different device
/// type). Initializes OptionalDeviceGuard if it is not already initialized.
///
/// See notes on why this is called reset_device on InlineDeviceGuard.
///
/// Optional argument is for testing only.
template <
typename U = T,
typename = typename std::enable_if<
std::is_same<U, VirtualGuardImpl>::value>::type>
void reset_device(
at::Device device,
const DeviceGuardImplInterface* impl = nullptr) {
if (!guard_.has_value()) {
guard_.emplace(device, impl);
} else {
guard_->reset_device(device, impl);
}
}
/// Resets the currently set device to its original device, and then sets the
/// current device to the passed device. Initializes the guard if it is
/// not already initialized. This is effectively equivalent to set_device
/// when a guard supports only a single device type.
template <
typename U = T,
typename = typename std::enable_if<
!std::is_same<U, VirtualGuardImpl>::value>::type>
void reset_device(at::Device device) {
if (!guard_.has_value()) {
guard_.emplace(device);
} else {
guard_->reset_device(device);
}
}
/// Sets the device index to the given one. The device type is statically
/// known.
template <
typename U = T,
typename = typename std::enable_if<
!std::is_same<U, VirtualGuardImpl>::value>::type>
void set_index(DeviceIndex index) {
if (!guard_.has_value()) {
guard_.emplace(index);
} else {
guard_->set_index(index);
}
}
/// Returns the device that was set immediately prior to initialization of
/// the, guard, or nullopt if the guard is uninitialized.
optional<Device> original_device() const {
return guard_.has_value() ? make_optional(guard_->original_device())
: nullopt;
}
/// Returns the most recent device that was set using this device guard,
/// either from construction, or via set_device, if the guard is initialized,
/// or nullopt if the guard is uninitialized.
optional<Device> current_device() const {
return guard_.has_value() ? make_optional(guard_->current_device())
: nullopt;
}
/// Restore the original device, resetting this guard to uninitialized state.
void reset() {
guard_.reset();
}
private:
optional<InlineDeviceGuard<T>> guard_;
};
} // namespace impl
} // namespace c10
| 15,715
| 35.37963
| 80
|
h
|
null |
pytorch-main/c10/core/impl/InlineEvent.h
|
#pragma once
#include <c10/core/DeviceType.h>
#include <c10/core/Stream.h>
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/util/Exception.h>
namespace c10 {
namespace impl {
template <typename T>
struct InlineEvent final {
InlineEvent() = delete;
InlineEvent(
const DeviceType _device_type,
const EventFlag _flag = EventFlag::PYTORCH_DEFAULT)
: backend_{_device_type}, device_type_{_device_type}, flag_{_flag} {}
// Copy constructor and copy assignment operator (deleted)
InlineEvent(const InlineEvent&) = delete;
InlineEvent& operator=(const InlineEvent&) = delete;
// Move constructor and move assignment operator
InlineEvent(InlineEvent&& other) noexcept
: InlineEvent(other.device_type_, other.flag_) {
swap(std::move(other));
}
InlineEvent& operator=(InlineEvent&& other) noexcept {
swap(std::move(other));
return *this;
}
void swap(InlineEvent&& other) {
std::swap(event_, other.event_);
std::swap(backend_, other.backend_);
std::swap(device_type_, other.device_type_);
std::swap(device_index_, other.device_index_);
std::swap(flag_, other.flag_);
std::swap(was_marked_for_recording_, other.was_marked_for_recording_);
}
~InlineEvent() noexcept {
if (event_)
backend_.destroyEvent(event_, device_index_);
}
DeviceType device_type() const noexcept {
return device_type_;
}
DeviceIndex device_index() const noexcept {
return device_index_;
}
EventFlag flag() const noexcept {
return flag_;
}
bool was_marked_for_recording() const noexcept {
return was_marked_for_recording_;
}
void recordOnce(const Stream& stream) {
if (!was_marked_for_recording_)
record(stream);
}
void record(const Stream& stream) {
TORCH_CHECK(
stream.device_type() == device_type_,
"Event device type ",
DeviceTypeName(device_type_),
" does not match recording stream's device type ",
DeviceTypeName(stream.device_type()),
".");
backend_.record(&event_, stream, device_index_, flag_);
was_marked_for_recording_ = true;
device_index_ = stream.device_index();
}
void block(const Stream& stream) const {
if (!was_marked_for_recording_)
return;
TORCH_CHECK(
stream.device_type() == device_type_,
"Event device type ",
DeviceTypeName(device_type_),
" does not match blocking stream's device type ",
DeviceTypeName(stream.device_type()),
".");
backend_.block(event_, stream);
}
bool query() const {
if (!was_marked_for_recording_)
return true;
return backend_.queryEvent(event_);
}
private:
void* event_ = nullptr;
T backend_;
DeviceType device_type_;
DeviceIndex device_index_ = -1;
EventFlag flag_ = EventFlag::PYTORCH_DEFAULT;
bool was_marked_for_recording_ = false;
};
} // namespace impl
} // namespace c10
| 2,930
| 25.405405
| 75
|
h
|
null |
pytorch-main/c10/core/impl/InlineStreamGuard.h
|
#pragma once
#include <c10/core/impl/InlineDeviceGuard.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/irange.h>
namespace c10 {
namespace impl {
/**
* A StreamGuard is an RAII class that changes the current device
* to the device corresponding to some stream, and changes the
* default stream on that device to be this stream.
*
* InlineStreamGuard is a helper class for implementing StreamGuards.
* See InlineDeviceGuard for guidance on how to use this class.
*/
template <typename T>
class InlineStreamGuard : private InlineDeviceGuard<T> {
public:
/// No default constructor, see Note [Omitted default constructor from RAII]
explicit InlineStreamGuard() = delete;
/// Set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
explicit InlineStreamGuard(Stream stream)
: InlineDeviceGuard<T>(stream.device()),
original_stream_of_original_device_(
this->impl_.getStream(original_device())),
original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
current_stream_(stream) {}
/// This constructor exists purely for testing
template <
typename U = T,
typename = typename std::enable_if<
std::is_same<U, VirtualGuardImpl>::value>::type>
explicit InlineStreamGuard(
Stream stream,
const DeviceGuardImplInterface* impl)
: InlineDeviceGuard<T>(
stream.device(),
impl ? impl : getDeviceGuardImpl(stream.device_type())),
original_stream_of_original_device_(
this->impl_.getStream(original_device())),
original_stream_of_current_device_(this->impl_.exchangeStream(stream)),
current_stream_(stream) {}
/// Copy is disallowed
InlineStreamGuard(const InlineStreamGuard<T>&) = delete;
InlineStreamGuard<T>& operator=(const InlineStreamGuard<T>&) = delete;
/// Move is disallowed, as StreamGuard does not have an uninitialized state,
/// which is required for moves on types with nontrivial destructors.
InlineStreamGuard(InlineStreamGuard<T>&& other) = delete;
InlineStreamGuard& operator=(InlineStreamGuard<T>&& other) = delete;
~InlineStreamGuard() {
this->impl_.exchangeStream(original_stream_of_current_device_);
}
/// Resets the currently set stream to the original stream and
/// the currently set device to the original device. Then,
/// set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
///
/// NOTE: this implementation may skip some stream/device setting if
/// it can prove that it is unnecessary.
///
/// WARNING: reset_stream does NOT preserve previously set streams on
/// different devices. If you need to set streams on multiple devices
/// use MultiStreamGuard instead.
void reset_stream(Stream stream) {
// TODO: make a version that takes an impl argument. Unfortunately,
// that will require SFINAE because impl is only valid for the
// VirtualGuardImpl specialization.
if (stream.device() == this->current_device()) {
this->impl_.exchangeStream(stream);
current_stream_ = stream;
} else {
// Destruct and reconstruct the StreamGuard in-place
this->impl_.exchangeStream(original_stream_of_current_device_);
this->reset_device(stream.device());
original_stream_of_current_device_ = this->impl_.exchangeStream(stream);
current_stream_ = stream;
}
}
// It's not clear if set_device should also reset the current stream
// if the device is unchanged; therefore, we don't provide it.
// The situation is somewhat clearer with reset_device, but it's still
// a pretty weird thing to do, so haven't added this either.
/// Returns the stream of the original device prior to this guard. Subtly,
/// the stream returned here is the original stream of the *original*
/// device; i.e., it's the stream that your computation *would* have
/// been put on, if it hadn't been for this meddling stream guard.
/// This is usually what you want.
Stream original_stream() const {
return original_stream_of_original_device_;
}
/// Returns the most recent stream that was set using this device guard,
/// either from construction, or via set_stream.
Stream current_stream() const {
return current_stream_;
}
/// Returns the most recent device that was set using this device guard,
/// either from construction, or via set_device/reset_device/set_index.
Device current_device() const {
return InlineDeviceGuard<T>::current_device();
}
/// Returns the device that was set at the most recent reset_stream(),
/// or otherwise the device at construction time.
Device original_device() const {
return InlineDeviceGuard<T>::original_device();
}
private:
Stream
original_stream_of_original_device_; // what the user probably cares about
Stream original_stream_of_current_device_; // what we need to restore
Stream current_stream_;
};
/**
* An OptionalStreamGuard is an RAII class that sets a device to some value on
* initialization, and resets the device to its original value on destruction.
* See InlineOptionalDeviceGuard for more guidance on how to use this class.
*/
template <typename T>
class InlineOptionalStreamGuard {
public:
/// Creates an uninitialized stream guard.
explicit InlineOptionalStreamGuard()
: guard_() // See Note [Explicit initialization of optional fields]
{}
/// Set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream,
/// if the passed stream is not nullopt.
explicit InlineOptionalStreamGuard(optional<Stream> stream_opt) : guard_() {
if (stream_opt.has_value()) {
guard_.emplace(stream_opt.value());
}
}
/// All constructors of StreamGuard are valid for OptionalStreamGuard
template <typename... Args>
explicit InlineOptionalStreamGuard(Args&&... args)
: guard_(in_place, std::forward<Args>(args)...) {}
// See Note [Move construction for RAII guards is tricky]
InlineOptionalStreamGuard(InlineOptionalStreamGuard<T>&& other) = delete;
// See Note [Move assignment for RAII guards is tricky]
InlineOptionalStreamGuard& operator=(InlineOptionalStreamGuard&& other) =
delete;
/// Resets the currently set stream to the original stream and
/// the currently set device to the original device. Then,
/// set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
/// Initializes the OptionalStreamGuard if it was not previously initialized.
void reset_stream(Stream stream) {
if (guard_.has_value()) {
guard_->reset_stream(stream);
} else {
guard_.emplace(stream);
}
}
/// Returns the stream that was set at the time the guard was most recently
/// initialized, or nullopt if the guard is uninitialized.
optional<Stream> original_stream() const {
return guard_.has_value() ? make_optional(guard_->original_stream())
: nullopt;
}
/// Returns the most recent stream that was set using this stream guard,
/// either from construction, or via reset_stream, if the guard is
/// initialized, or nullopt if the guard is uninitialized.
optional<Stream> current_stream() const {
return guard_.has_value() ? make_optional(guard_->current_stream())
: nullopt;
}
/// Restore the original device and stream, resetting this guard to
/// uninitialized state.
void reset() {
guard_.reset();
}
private:
optional<InlineStreamGuard<T>> guard_;
};
template <typename T>
class InlineMultiStreamGuard {
public:
/// Calls `set_stream` on each of the streams in the list.
/// This may be useful if you need to set different streams
/// for different devices.
explicit InlineMultiStreamGuard(ArrayRef<Stream> streams) {
if (!streams.empty()) {
impl_.emplace(getDeviceTypeOfStreams(streams));
original_streams_.reserve(streams.size());
for (const Stream& s : streams) {
original_streams_.emplace_back(this->impl_->exchangeStream(s));
}
}
}
/// Copy is disallowed
InlineMultiStreamGuard(const InlineMultiStreamGuard&) = delete;
InlineMultiStreamGuard<T>& operator=(const InlineMultiStreamGuard&) = delete;
/// Move is disallowed, as StreamGuard does not have an uninitialized state,
/// which is required for moves on types with nontrivial destructors.
InlineMultiStreamGuard(InlineMultiStreamGuard&& other) = delete;
InlineMultiStreamGuard& operator=(InlineMultiStreamGuard&& other) = delete;
~InlineMultiStreamGuard() {
for (const Stream& s : original_streams_) {
this->impl_->exchangeStream(s);
}
}
protected:
optional<T> impl_;
private:
/// The original streams that were active on all devices.
std::vector<Stream> original_streams_;
static DeviceType getDeviceTypeOfStreams(ArrayRef<Stream> streams) {
TORCH_INTERNAL_ASSERT(!streams.empty());
DeviceType type = streams[0].device_type();
for (const auto idx : c10::irange(1, streams.size())) {
TORCH_CHECK_VALUE(
streams[idx].device_type() == type,
"Streams have a mix of device types: stream 0 is on ",
streams[0].device(),
" while stream ",
idx,
" is on device ",
streams[idx].device());
}
return type;
}
};
} // namespace impl
} // namespace c10
| 9,629
| 36.470817
| 80
|
h
|
null |
pytorch-main/c10/core/impl/LocalDispatchKeySet.h
|
#pragma once
#include <c10/core/DispatchKeySet.h>
#include <c10/macros/Export.h>
// TLS management for DispatchKeySet (the "local" DispatchKeySet(s))
//
// This manages two thread-local DispatchKeySets:
//
// - The included type set, which adds a tensor type for consideration
// in dispatch. (For example, you might add Profiling to
// the included type set to turn on profiling on all tensor operations.)
//
// - The excluded type set, which disqualifies a tensor type from dispatch.
// (For example, after redispatching on variable, we disqualify
// Autograd so we don't attempt to handle variable again.)
// (Exclusion wins over inclusion.)
//
// NB: Originally, I implemented the excluded type set as storing the inverted
// set, but TLS is defined to be zero-initialized, so this doesn't actually work
// (if it's inverted, you want the set to be -1 initialized).
namespace c10 {
namespace impl {
// POD version of LocalDispatchKeySet. Declared here just so that
// we can put it in the guards.
// This struct encapsulates special handling for TLS initialization
// in set_included()/included() API so that they reflect the truth.
// If you want to create PODLocalDispatchKeySet with non-zero state,
// use set_included() instead of default constructor.
struct C10_API PODLocalDispatchKeySet {
uint64_t included_;
uint64_t excluded_;
// See Note [TLS Initialization]
DispatchKeySet included() const {
return DispatchKeySet(DispatchKeySet::RAW, included_) ^
c10::default_included_set;
}
DispatchKeySet excluded() const {
return DispatchKeySet(DispatchKeySet::RAW, excluded_) ^
c10::default_excluded_set;
}
void set_included(DispatchKeySet x) {
included_ = (x ^ c10::default_included_set).raw_repr();
}
void set_excluded(DispatchKeySet x) {
excluded_ = (x ^ c10::default_excluded_set).raw_repr();
}
};
static_assert(
std::is_trivial<PODLocalDispatchKeySet>::value,
"PODLocalDispatchKeySet must be a POD type.");
struct C10_API LocalDispatchKeySet {
/* implicit */ LocalDispatchKeySet(PODLocalDispatchKeySet x)
: included_(x.included()), excluded_(x.excluded()) {}
DispatchKeySet included_;
DispatchKeySet excluded_;
};
// thread_local variables cannot be C10_API on Windows.
// Inlining this seems to break AutoDispatchBelowAutograd on Android.
#if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
C10_API LocalDispatchKeySet tls_local_dispatch_key_set();
#else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
extern C10_API thread_local PODLocalDispatchKeySet raw_local_dispatch_key_set;
inline C10_API LocalDispatchKeySet tls_local_dispatch_key_set() {
// Don't let people fiddle with the thread_local directly just
// because they include this header.
return raw_local_dispatch_key_set;
}
#endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
// Internal, use ThreadLocalStateGuard
C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set);
// RAII API for manipulating the thread-local dispatch state.
class C10_API IncludeDispatchKeyGuard {
public:
IncludeDispatchKeyGuard(DispatchKeySet);
IncludeDispatchKeyGuard(DispatchKey k)
: IncludeDispatchKeyGuard(DispatchKeySet(k)) {}
IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete;
IncludeDispatchKeyGuard operator=(const IncludeDispatchKeyGuard&) = delete;
IncludeDispatchKeyGuard(IncludeDispatchKeyGuard&&) = delete;
IncludeDispatchKeyGuard operator=(IncludeDispatchKeyGuard&&) = delete;
~IncludeDispatchKeyGuard();
private:
// A little micro-optimization to save us from tls_get_addr call
// on destruction
PODLocalDispatchKeySet* tls_;
DispatchKeySet include_;
};
class C10_API ExcludeDispatchKeyGuard {
public:
ExcludeDispatchKeyGuard(DispatchKeySet);
ExcludeDispatchKeyGuard(DispatchKey k)
: ExcludeDispatchKeyGuard(DispatchKeySet(k)) {}
ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete;
ExcludeDispatchKeyGuard operator=(const ExcludeDispatchKeyGuard&) = delete;
ExcludeDispatchKeyGuard(ExcludeDispatchKeyGuard&&) = delete;
ExcludeDispatchKeyGuard operator=(ExcludeDispatchKeyGuard&&) = delete;
~ExcludeDispatchKeyGuard();
private:
// A little micro-optimization to save us from tls_get_addr call
// on destruction
PODLocalDispatchKeySet* tls_;
DispatchKeySet exclude_;
};
struct C10_API ForceDispatchKeyGuard {
public:
ForceDispatchKeyGuard(c10::impl::LocalDispatchKeySet key_set)
: saved_keyset_(c10::impl::tls_local_dispatch_key_set()) {
c10::impl::_force_tls_local_dispatch_key_set(key_set);
}
~ForceDispatchKeyGuard() {
c10::impl::_force_tls_local_dispatch_key_set(saved_keyset_);
}
private:
c10::impl::LocalDispatchKeySet saved_keyset_;
};
// Non-RAII API for manipulating the thread-local dispatch state.
// Please prefer the RAII API. The non-RAII API may be useful when
// the included/excluded state of a given DispatchKey must span
// many calls from the Python to the C++, so you cannot conveniently
// use an RAII guard.
//
// Example use case: a Python context manager that includes a certain
// DispatchKey, to ensure ops running under the context manager dispatch
// through that DispatchKey's registered overrides.
//
// The non-RAII API is less efficient than the RAII guards because both the
// getter and setter will do a tls_getaddr lookup (the RAII struct only needs
// one!)
C10_API bool tls_is_dispatch_key_excluded(DispatchKey x);
C10_API void tls_set_dispatch_key_excluded(DispatchKey x, bool desired_state);
C10_API bool tls_is_dispatch_key_included(DispatchKey x);
C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state);
C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks);
C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks);
} // namespace impl
} // namespace c10
| 5,916
| 36.929487
| 80
|
h
|
null |
pytorch-main/c10/core/impl/PyInterpreter.h
|
#pragma once
#include <c10/core/Device.h>
#include <c10/core/Layout.h>
#include <c10/core/MemoryFormat.h>
#include <c10/core/SymIntArrayRef.h>
#include <c10/macros/Export.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/intrusive_ptr.h>
#include <c10/util/python_stub.h>
#include <string>
#include <vector>
// Forward declarations
namespace c10 {
struct IValue;
class OperatorHandle;
struct TensorImpl;
} // namespace c10
namespace torch {
namespace jit {
using Stack = std::vector<c10::IValue>;
}
} // namespace torch
// Actual implementation
namespace c10 {
namespace impl {
struct C10_API PyInterpreter;
// Note [Python interpreter tag]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Traditionally, PyTorch is layered such that our Python library
// (libtorch_python) references our pure C++ library (libtorch) as the
// natural order of things. However, sometimes this natural order is
// subverted: C++ objects refer to Python objects (for example, we
// store a PyObject* pointer on TensorImpl so that converting from a
// C++ Tensor to a Python Tensor is just a memory dereference).
//
// These unusual orderings must be treated with care. To start, you need to
// virtualize the destructor so that the PyObject can be decref'ed on
// destruction (because the C++ object itself doesn't know anything about
// Python--remember, layering!). This process itself is fraught, since
// acquiring the GIL could lead to deadlocks if someone is blocking on you
// while holding the GIL. Furthermore, if the C++ objects outlive the
// interpreter (which can happen if you stash them in a static global
// variable defined in libtorch), you may attempt to decref the object when
// the Python interpreter has already been shutdown.
//
// BUT WAIT, IT GETS WORSE. With torchdeploy, there may be multiple Python
// interpreters in a single process. If a C++ object is accessible from
// multiple interpreters, we must take care not to accidentally pass a
// PyObject from one interpreter with another interpreter.
//
// To prevent these mixups, we introduce a PyInterpreter "tag" (object with
// a vtable), which specifies a specific Python interpreter.
//
// - Any given object can be associated with AT MOST one Python interpreter.
// We represent the interpreter tag as a memory address to an instance of
// a virtual class that is allocated once per interpreter (this is so that
// we can request the interpreter to perform operations for us, if
// necessary).
//
// - It can be recorded with a PyObject (PyInterpreterObject) so that
// we know what interpreter the object is associated with, and we can
// raise an error if you try to use the PyObject from the wrong
// interpreter context.
//
// - It contains a vtable that can be used to perform various Python
// operations from ordinary C++ code that ordinarily wouldn't be accessible
// from libtorch.
//
// A simple use case is when a C++ object must be associated with a PyObject.
// However, for TensorImpl, we lazily allocate a PyObject the first time the
// object passes into Python. The invariants for this situation are more
// subtle:
//
// - A given TensorImpl's interpreter tag can only go from uninitialized to
// tagged; once tagged, this is a quiescent state (once tagged to an
// interpreter, ALWAYS tagged to that interpreter)
//
// - A thread may mutate the PyObject field of a TensorImpl if and only if it
// holds the GIL for the interpreter tagged on the TensorImpl. (If the
// TensorImpl is not tagged, it must first atomically claim its tag before it
// can validly write)
//
// WARNING: This class has to be written very carefully, because it may be
// possible for a Tensor to have a reference an interpreter corresponding to
// a shared library that has ALREADY BEEN UNLOADED. This makes blindly calling
// virtual methods very dangerous, because the vtable may be garbage at that
// point (on a good day, you might get "pure virtual method called").
//
// The idea to solve this problem is we always leak PyInterpreters (so they
// always stay live even after dlclose), and make sure we can disarm their
// virtual methods by indirecting through a separate PyInterpreterVTable
// object. This can be replaced with a no-op vtable from libc10.so, which
// is guaranteed to stick around until the bitter end.
//
// NB: The downside with representing PyInterpreter tags as full objects is that
// it takes an extra word on TensorImpl. If tags were instead just integer
// indices, on 64-bit architectures we could pack the tag and PyObject together
// into a single atomic word. On 32-bit architectures we could simply say that
// only one Python interpreter is supported (erroring if a nontrivial
// interpreter tag is attempted to be set).
//
// The difficulty with this scheme is we need to maintain an out-of-line table
// to get at the PyInterpreters so that we can do virtual method calls on them,
// and registration/deregistration to this table must be done in a thread safe
// manner. This can be easily done if the number of possible PyInterpreters is
// small enough (e.g., 8-bit integer) by simply preallocating an array of
// sufficient size to hold all possible interpreters. Surely 128 threads is
// more than enough for anyone!
//
// I didn't decide to do this technique at the moment, because the extra word
// added by the PyInterpreter tag takes us to 24 words, which means that we
// still fit inside three eight word cache lines. If you need to penny pinch
// another word consider doing this!
struct C10_API PyInterpreterVTable {
virtual ~PyInterpreterVTable() = default;
// Report the name of this interpreter
virtual std::string name() const = 0;
// Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call
// See NOTE [PyInterpreter::decref takes an `is_tensor` arg]
virtual void decref(PyObject* pyobj, bool is_tensor) const = 0;
// Perform a detach by deferring to the __torch_dispatch__ implementation of
// detach, which will also arrange for the PyObject to get copied in this
// situation
virtual c10::intrusive_ptr<TensorImpl> detach(
const TensorImpl* self) const = 0;
// Invoke the Python boxed fallback dispatch to go back into Python
virtual void dispatch(const c10::OperatorHandle& op, torch::jit::Stack* stack)
const = 0;
virtual void reportErrorCallback(PyObject* callback, DispatchKey key)
const = 0;
// This is only invoked in the multipy/torchdeploy situation from
// pythonOpRegistrationTrampoline; this lets us get to the Python
// interpreter to actually find the appropriate Python op registration
// entry to call.
virtual void python_op_registration_trampoline(
const c10::OperatorHandle& op,
c10::DispatchKey,
torch::jit::Stack* stack) const = 0;
// Invoke the Python dispatcher to handle this call
virtual void python_dispatcher(
const c10::OperatorHandle& op,
c10::DispatchKeySet,
torch::jit::Stack* stack) const = 0;
virtual bool is_contiguous(const TensorImpl* self, at::MemoryFormat)
const = 0;
virtual bool is_strides_like(const TensorImpl* self, at::MemoryFormat)
const = 0;
virtual bool is_non_overlapping_and_dense(const TensorImpl* self) const = 0;
virtual c10::Device device(const TensorImpl* self) const = 0;
virtual int64_t dim(const TensorImpl* self) const = 0;
virtual c10::IntArrayRef strides(const TensorImpl* self) const = 0;
virtual c10::IntArrayRef sizes(const TensorImpl* self) const = 0;
virtual c10::SymIntArrayRef sym_sizes(const TensorImpl* self) const = 0;
virtual c10::Layout layout(const TensorImpl* self) const = 0;
virtual c10::SymInt sym_numel(const TensorImpl* self) const = 0;
virtual c10::SymIntArrayRef sym_strides(const TensorImpl* self) const = 0;
virtual c10::SymInt sym_storage_offset(const TensorImpl* self) const = 0;
virtual void trace_gpu_event_creation(uintptr_t event) const = 0;
virtual void trace_gpu_event_deletion(uintptr_t event) const = 0;
virtual void trace_gpu_event_record(uintptr_t event, uintptr_t stream)
const = 0;
virtual void trace_gpu_event_wait(uintptr_t event, uintptr_t stream)
const = 0;
virtual void trace_gpu_memory_allocation(uintptr_t ptr) const = 0;
virtual void trace_gpu_memory_deallocation(uintptr_t ptr) const = 0;
virtual void trace_gpu_stream_creation(uintptr_t stream) const = 0;
virtual void trace_gpu_device_synchronization() const = 0;
virtual void trace_gpu_stream_synchronization(uintptr_t stream) const = 0;
virtual void trace_gpu_event_synchronization(uintptr_t event) const = 0;
virtual void reset_backward_hooks(const TensorImpl* self) const = 0;
};
struct C10_API PyInterpreter {
const PyInterpreterVTable* vtable_;
PyInterpreter(const PyInterpreterVTable* vtable) : vtable_(vtable){};
const PyInterpreterVTable& operator*() const noexcept {
return *vtable_;
}
const PyInterpreterVTable* operator->() const noexcept {
return vtable_;
}
// Disarm this PyInterpreter, making all of its methods noops.
// The vtable pointer is not an atomic at the moment, which means
// a disarm() invocation that is concurrent with active destructors
// is not thread safe and will trigger TSAN. My hope is that this
// situations doesn't ever actually happen; tensor destruction should
// quiesce when a dlclose happens, and any long lived tensors whose
// destructors would be disarmed here only begin the destruction process
// on process shutdown (long after the dlclose has occurred).
void disarm() noexcept;
};
// PyInterpreterStatus describes what the state of its interpreter tag
// is, relative to the thread currently holding the GIL.
enum class PyInterpreterStatus {
// We just allocated the Tensor, it hasn't escaped to other threads,
// we know that it definitely hasn't been tagged to be associated
// with an interpreter.
DEFINITELY_UNINITIALIZED,
// We queried the interpreter field and it looked uninitialized. But
// another thread may have raced with us to tag it with some other
// interpreter id. So we will have to do a CEX to make sure we can
// actually nab it.
MAYBE_UNINITIALIZED,
// We queried the interpreter field and it was tagged to belong to us.
// This means we have sole write access (as we hold the GIL for this
// interpreter)
TAGGED_BY_US,
// Someone else tagged this. We can't use this TensorImpl from Python.
TAGGED_BY_OTHER,
};
} // namespace impl
} // namespace c10
| 10,530
| 43.434599
| 80
|
h
|
null |
pytorch-main/c10/core/impl/PyObjectSlot.h
|
#pragma once
#include <c10/core/impl/HermeticPyObjectTLS.h>
#include <c10/core/impl/PyInterpreter.h>
#include <c10/util/Optional.h>
#include <c10/util/python_stub.h>
#include <atomic>
namespace c10 {
namespace impl {
struct C10_API PyObjectSlot {
public:
PyObjectSlot();
void destroy_pyobj_if_needed();
// Associate the TensorImpl with the specified PyObject, and, if necessary,
// also tag the interpreter.
//
// NB: This lives in a header so that we can inline away the switch on status
//
// NB: THIS FUNCTION CAN RAISE AN EXCEPTION. Make sure to clean up after
// PyObject if necessary!
void init_pyobj(
PyInterpreter* self_interpreter,
PyObject* pyobj,
PyInterpreterStatus status) {
impl::PyInterpreter* expected = nullptr;
switch (status) {
case impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED:
// caller guarantees there is no multithreaded access; if there is
// no data race OK to do a relaxed store
pyobj_interpreter_.store(self_interpreter, std::memory_order_relaxed);
break;
case impl::PyInterpreterStatus::TAGGED_BY_US:
// no tagging is necessary, the tag is already correct
break;
case impl::PyInterpreterStatus::MAYBE_UNINITIALIZED:
// attempt to claim this TensorImpl with the specified interpreter
// tag
if (pyobj_interpreter_.compare_exchange_strong(
expected, self_interpreter, std::memory_order_acq_rel)) {
break;
}
// test if, actually, it was already tagged by us! this situation can't
// be caused by a race, but it could be caused by a situation
// where someone conservatively tagged the tensor as MAYBE_UNINITIALIZED
// (because they didn't pre-check the tag) when actually it was
// owned by the interpreter
if (expected == self_interpreter) {
break;
}
// fallthrough, we lost the race. We are guaranteed not to lose the
// race with ourself, as calls to init_pyobj with the same interpreter
// ID must be sequentialized by the GIL
C10_FALLTHROUGH;
case impl::PyInterpreterStatus::TAGGED_BY_OTHER:
TORCH_CHECK(
false,
"cannot allocate PyObject for Tensor on interpreter ",
self_interpreter,
" that has already been used by another torch deploy interpreter ",
pyobj_interpreter_.load());
}
// we are the ONLY thread that can have gotten to this point. It is not
// possible to conflict with another zero interpreter as access is protected
// by GIL
// NB: owns_pyobj tag is initially false
pyobj_ = pyobj;
}
// Query the PyObject interpreter. This may return null if there is no
// interpreter. This is racy!
PyInterpreter* pyobj_interpreter();
PyObject* _unchecked_untagged_pyobj() const;
// Test the interpreter tag. If tagged for the current interpreter, return
// a non-nullopt (but possibly null) PyObject. If (possibly) untagged,
// returns a nullopt. If it is definitely invalid, raises an error.
//
// NB: this lives in header so that we can avoid actually creating the
// c10::optional
c10::optional<PyObject*> check_pyobj(PyInterpreter* self_interpreter) const {
// Note [Memory ordering on Python interpreter tag]
impl::PyInterpreter* interpreter =
pyobj_interpreter_.load(std::memory_order_acquire);
if (interpreter == nullptr) {
// NB: This never returns DEFINITELY_UNINITIALIZED because there is
// always the possibility that another thread races to initialize
// after we query here. The only time when we can conclude a tensor
// is definitely uninitialized is when we have just allocated it and
// it cannot have escaped to other threads yet
return c10::nullopt;
} else if (interpreter == self_interpreter) {
// NB: pyobj_ could still be null!
if (c10::impl::HermeticPyObjectTLS::get_state()) {
return c10::nullopt;
} else {
return c10::make_optional(_unchecked_untagged_pyobj());
}
} else {
TORCH_CHECK(
false,
"cannot access PyObject for Tensor on interpreter ",
(*self_interpreter)->name(),
" that has already been used by another torch deploy interpreter ",
(*pyobj_interpreter_.load())->name());
}
}
// Clear the PyObject field for an interpreter, in situations where we
// statically know the tensor is tagged with our interpreter.
void unchecked_clear_pyobj(PyInterpreter* interpreter);
PyInterpreter& load_pyobj_interpreter() const;
bool owns_pyobj();
void set_owns_pyobj(bool b);
private:
// This field contains the interpreter tag for this object. See
// Note [Python interpreter tag] for general context
//
// Note [Memory ordering on Python interpreter tag]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// What memory_order do we need when accessing this atomic? We don't
// need a single total modification order (as provided by
// memory_order_seq_cst) as pyobj_interpreter_ is monotonic: it can only
// transition from -1 to some positive integer and never changes afterwards.
// Because there is only one modification, it trivially already has a total
// modification order (e.g., we don't need fences or locked instructions on
// x86)
//
// In fact, one could make a reasonable argument that relaxed reads are OK,
// due to the presence of external locking (GIL) to ensure that interactions
// with other data structures are still correctly synchronized, so that
// we fall in the "Single-Location Data Structures" case as described in
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
// However, on x86, it doesn't matter if I use acquire or relaxed on the load
// as I get the same assembly in both cases. So I just use the more
// conservative acquire (which will impede compiler optimizations but I don't
// care)
std::atomic<PyInterpreter*> pyobj_interpreter_;
// This field contains a reference to a PyObject representing this Tensor.
// If pyobj is nullptr, when we transfer Tensor to Python, we allocate a new
// PyObject for it and set this field. This field does not have to be
// protected by an atomic as it is only allowed to be accessed when you hold
// the GIL, or during destruction of the tensor.
//
// When a PyObject dies, you are obligated to clear this field
// (otherwise, you will try to use-after-free the pyobj); this currently
// occurs in THPVariable_clear in torch/csrc/autograd/python_variable.cpp
//
// NB: Ordinarily, this should not be a strong reference, as if the
// PyObject owns the Tensor, this would create a reference cycle.
// However, sometimes this ownership flips. To track who owns
// who, this has a single pointer tag indicating whether or not the
// C++ object owns the PyObject (the common case, zero, means PyObject
// owns the C++ object); see _unchecked_untagged_pyobj for raw access
// or check_pyobj for checked access. See references to PyObject
// resurrection in torch/csrc/autograd/python_variable.cpp
PyObject* pyobj_;
};
} // namespace impl
} // namespace c10
| 7,301
| 41.208092
| 80
|
h
|
null |
pytorch-main/c10/core/impl/PythonDispatcherTLS.h
|
#pragma once
#include <c10/core/impl/PyInterpreter.h>
#include <c10/macros/Export.h>
namespace c10 {
namespace impl {
struct C10_API PythonDispatcherTLS {
static void set_state(PyInterpreter* state);
static PyInterpreter* get_state();
static void reset_state();
};
struct C10_API DisablePythonDispatcher {
DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) {
PythonDispatcherTLS::set_state({});
}
~DisablePythonDispatcher() {
PythonDispatcherTLS::set_state(old_);
}
PyInterpreter* old_;
};
} // namespace impl
} // namespace c10
| 574
| 20.296296
| 70
|
h
|
null |
pytorch-main/c10/core/impl/SizesAndStrides.h
|
#pragma once
#include <algorithm>
#include <cstdint>
#include <c10/macros/Macros.h>
#include <c10/util/ArrayRef.h>
#include <c10/util/SmallVector.h>
#define C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE 5
namespace c10 {
namespace impl {
// Packed container for TensorImpl sizes and strides.
// This design improves on the previous approach of using a pair of
// c10::SmallVector<int64_t, 5> by specializing for the operations we
// actually use and enforcing that the number of sizes is the same as
// the number of strides. The memory layout is as follows:
//
// 1 size_t for the size
// 5 eightbytes of inline sizes and 5 eightbytes of inline strides, OR pointer
// to out-of-line array
class C10_API SizesAndStrides {
public:
// TODO: different iterator types for sizes & strides to prevent
// mixing the two accidentally.
using sizes_iterator = int64_t*;
using sizes_const_iterator = const int64_t*;
using strides_iterator = int64_t*;
using strides_const_iterator = const int64_t*;
SizesAndStrides() : size_(1) {
size_at_unchecked(0) = 0;
stride_at_unchecked(0) = 1;
}
~SizesAndStrides() {
if (C10_UNLIKELY(!isInline())) {
free(outOfLineStorage_);
}
}
SizesAndStrides(const SizesAndStrides& rhs) : size_(rhs.size_) {
if (C10_LIKELY(rhs.isInline())) {
copyDataInline(rhs);
} else {
allocateOutOfLineStorage(size_);
copyDataOutline(rhs);
}
}
SizesAndStrides& operator=(const SizesAndStrides& rhs) {
if (this == &rhs) {
return *this;
}
if (C10_LIKELY(rhs.isInline())) {
if (C10_UNLIKELY(!isInline())) {
free(outOfLineStorage_);
}
copyDataInline(rhs);
} else {
if (isInline()) {
allocateOutOfLineStorage(rhs.size_);
} else {
resizeOutOfLineStorage(rhs.size_);
}
copyDataOutline(rhs);
}
size_ = rhs.size_;
return *this;
}
// Move from rhs. rhs.size() == 0 afterwards.
SizesAndStrides(SizesAndStrides&& rhs) noexcept : size_(rhs.size_) {
if (C10_LIKELY(isInline())) {
memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_));
} else {
outOfLineStorage_ = rhs.outOfLineStorage_;
rhs.outOfLineStorage_ = nullptr;
}
rhs.size_ = 0;
}
// Move from rhs. rhs.size() == 0 afterwards.
SizesAndStrides& operator=(SizesAndStrides&& rhs) noexcept {
if (this == &rhs) {
return *this;
}
if (C10_LIKELY(rhs.isInline())) {
if (C10_UNLIKELY(!isInline())) {
free(outOfLineStorage_);
}
copyDataInline(rhs);
} else {
// They're outline. We're going to steal their vector.
if (!isInline()) {
free(outOfLineStorage_);
}
outOfLineStorage_ = rhs.outOfLineStorage_;
rhs.outOfLineStorage_ = nullptr;
}
size_ = rhs.size_;
rhs.size_ = 0;
return *this;
}
size_t size() const noexcept {
return size_;
}
const int64_t* sizes_data() const noexcept {
if (C10_LIKELY(isInline())) {
return &inlineStorage_[0];
} else {
return &outOfLineStorage_[0];
}
}
int64_t* sizes_data() noexcept {
if (C10_LIKELY(isInline())) {
return &inlineStorage_[0];
} else {
return &outOfLineStorage_[0];
}
}
sizes_const_iterator sizes_begin() const noexcept {
return sizes_data();
}
sizes_iterator sizes_begin() noexcept {
return sizes_data();
}
sizes_const_iterator sizes_end() const noexcept {
return sizes_begin() + size();
}
sizes_iterator sizes_end() noexcept {
return sizes_begin() + size();
}
IntArrayRef sizes_arrayref() const noexcept {
return IntArrayRef{sizes_data(), size()};
}
void set_sizes(IntArrayRef newSizes) {
resize(newSizes.size());
std::copy(newSizes.begin(), newSizes.end(), sizes_begin());
}
void set_strides(IntArrayRef strides) {
TORCH_INTERNAL_ASSERT(strides.size() == size());
std::copy(strides.begin(), strides.end(), strides_begin());
}
const int64_t* strides_data() const noexcept {
if (C10_LIKELY(isInline())) {
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
} else {
return &outOfLineStorage_[size()];
}
}
int64_t* strides_data() noexcept {
if (C10_LIKELY(isInline())) {
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
} else {
return &outOfLineStorage_[size()];
}
}
strides_const_iterator strides_begin() const noexcept {
if (C10_LIKELY(isInline())) {
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
} else {
return &outOfLineStorage_[size()];
}
}
strides_iterator strides_begin() noexcept {
if (C10_LIKELY(isInline())) {
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
} else {
return &outOfLineStorage_[size()];
}
}
strides_const_iterator strides_end() const noexcept {
return strides_begin() + size();
}
strides_iterator strides_end() noexcept {
return strides_begin() + size();
}
IntArrayRef strides_arrayref() const noexcept {
return IntArrayRef{strides_data(), size()};
}
// Size accessors.
int64_t size_at(size_t idx) const noexcept {
assert(idx < size());
return sizes_data()[idx];
}
int64_t& size_at(size_t idx) noexcept {
assert(idx < size());
return sizes_data()[idx];
}
int64_t size_at_unchecked(size_t idx) const noexcept {
return sizes_data()[idx];
}
int64_t& size_at_unchecked(size_t idx) noexcept {
return sizes_data()[idx];
}
// Size accessors.
int64_t stride_at(size_t idx) const noexcept {
assert(idx < size());
return strides_data()[idx];
}
int64_t& stride_at(size_t idx) noexcept {
assert(idx < size());
return strides_data()[idx];
}
int64_t stride_at_unchecked(size_t idx) const noexcept {
return strides_data()[idx];
}
int64_t& stride_at_unchecked(size_t idx) noexcept {
return strides_data()[idx];
}
void resize(size_t newSize) {
const auto oldSize = size();
if (newSize == oldSize) {
return;
}
if (C10_LIKELY(
newSize <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE && isInline())) {
if (oldSize < newSize) {
const auto bytesToZero =
(newSize - oldSize) * sizeof(inlineStorage_[0]);
memset(&inlineStorage_[oldSize], 0, bytesToZero);
memset(
&inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE + oldSize],
0,
bytesToZero);
}
size_ = newSize;
} else {
resizeSlowPath(newSize, oldSize);
}
}
void resizeSlowPath(size_t newSize, size_t oldSize);
private:
bool isInline() const noexcept {
return size_ <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE;
}
void copyDataInline(const SizesAndStrides& rhs) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.isInline());
memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_));
}
static size_t storageBytes(size_t size) noexcept {
return size * 2 * sizeof(int64_t);
}
void allocateOutOfLineStorage(size_t size) {
outOfLineStorage_ = static_cast<int64_t*>(malloc(storageBytes(size)));
TORCH_CHECK(
outOfLineStorage_,
"Could not allocate memory for Tensor SizesAndStrides!");
}
void resizeOutOfLineStorage(size_t newSize) {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isInline());
outOfLineStorage_ = static_cast<int64_t*>(
realloc(outOfLineStorage_, storageBytes(newSize)));
TORCH_CHECK(
outOfLineStorage_,
"Could not allocate memory for Tensor SizesAndStrides!");
}
void copyDataOutline(const SizesAndStrides& rhs) noexcept {
memcpy(outOfLineStorage_, rhs.outOfLineStorage_, storageBytes(rhs.size_));
}
size_t size_;
union {
int64_t* outOfLineStorage_;
int64_t inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE * 2]{};
};
};
} // namespace impl
} // namespace c10
| 7,934
| 24.679612
| 78
|
h
|
null |
pytorch-main/c10/core/impl/TorchDispatchModeTLS.h
|
#pragma once
#include <c10/core/SafePyObject.h>
#include <c10/macros/Export.h>
namespace c10 {
namespace impl {
struct C10_API TorchDispatchModeTLS {
static void push_onto_stack(std::shared_ptr<SafePyObject> mode);
static const std::shared_ptr<SafePyObject> pop_stack();
static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx);
static int64_t stack_len();
static const TorchDispatchModeTLS& get_state();
static void set_state(TorchDispatchModeTLS state);
private:
std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
};
C10_API bool dispatch_mode_enabled();
} // namespace impl
} // namespace c10
| 636
| 23.5
| 72
|
h
|
null |
pytorch-main/c10/core/impl/VirtualGuardImpl.h
|
#pragma once
#include <c10/core/impl/DeviceGuardImplInterface.h>
namespace c10 {
namespace impl {
/**
* An implementation of DeviceGuardImplInterface which delegates
* to virtual dispatch on the DeviceGuardImpl registry.
*/
class VirtualGuardImpl final : public DeviceGuardImplInterface {
public:
VirtualGuardImpl(DeviceType device_type)
: impl_(getDeviceGuardImpl(device_type)) {}
// This constructor exists purely for testing
VirtualGuardImpl(const DeviceGuardImplInterface* impl) : impl_(impl) {}
// Copying and moving is OK!
DeviceType type() const override {
return impl_->type();
}
Device exchangeDevice(Device d) const override {
return impl_->exchangeDevice(d);
}
Device getDevice() const override {
return impl_->getDevice();
}
void setDevice(Device d) const override {
impl_->setDevice(d);
}
void uncheckedSetDevice(Device d) const noexcept override {
impl_->uncheckedSetDevice(d);
}
Stream getStream(Device d) const noexcept override {
return impl_->getStream(d);
}
Stream getDefaultStream(Device d) const override {
return impl_->getDefaultStream(d);
}
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
const override {
return impl_->getStreamFromGlobalPool(d, isHighPriority);
}
Stream exchangeStream(Stream s) const noexcept override {
return impl_->exchangeStream(s);
}
DeviceIndex deviceCount() const noexcept override {
return impl_->deviceCount();
}
// Event functions
void record(
void** event,
const Stream& stream,
const DeviceIndex device_index,
const EventFlag flag) const override {
impl_->record(event, stream, device_index, flag);
}
void block(void* event, const Stream& stream) const override {
impl_->block(event, stream);
}
bool queryEvent(void* event) const override {
return impl_->queryEvent(event);
}
void destroyEvent(void* event, const DeviceIndex device_index)
const noexcept override {
impl_->destroyEvent(event, device_index);
}
bool queryStream(const Stream& stream) const override {
return impl_->queryStream(stream);
}
void synchronizeStream(const Stream& stream) const override {
impl_->synchronizeStream(stream);
}
void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
const override {
impl_->recordDataPtrOnStream(data_ptr, stream);
}
private:
const DeviceGuardImplInterface* impl_ = nullptr;
};
} // namespace impl
} // namespace c10
| 2,535
| 27.177778
| 80
|
h
|
null |
pytorch-main/c10/core/impl/cow/context.h
|
#pragma once
#include <c10/macros/Export.h>
#include <c10/util/UniqueVoidPtr.h>
#include <atomic>
#include <cstdint>
#include <memory>
#include <shared_mutex>
#include <variant>
namespace c10::impl::cow {
/// The c10::DataPtr context for copy-on-write storage.
class C10_API Context {
public:
/// Creates an instance, holding the pair of data and original
/// deleter.
///
/// Note that the deleter will only be called in our destructor if
/// the last reference to this goes away without getting
/// materialized.
explicit Context(std::unique_ptr<void, DeleterFnPtr> data);
/// Increments the current refcount.
auto increment_refcount() -> void;
// See README.md in this directory to understand the locking
// strategy.
/// Represents a reference to the context.
///
/// This is returned by decrement_refcount to allow the caller to
/// copy the data under the shared lock.
using NotLastReference = std::shared_lock<std::shared_mutex>;
/// Represents the last reference to the context.
///
/// This will be returned by decrement_refcount when it is the last
/// reference remaining and after any pending copies have completed.
using LastReference = std::unique_ptr<void, DeleterFnPtr>;
/// Decrements the refcount, returning a handle indicating what to
/// do with it.
auto decrement_refcount() -> std::variant<NotLastReference, LastReference>;
private:
// The destructor is hidden, this should only ever be used within
// UniqueVoidPtr using cow::delete_context as the deleter.
~Context();
std::shared_mutex mutex_;
std::unique_ptr<void, DeleterFnPtr> data_;
std::atomic<std::int64_t> refcount_ = 1;
};
} // namespace c10::impl::cow
| 1,709
| 28.482759
| 77
|
h
|
null |
pytorch-main/c10/cuda/CUDAAlgorithm.h
|
#ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#endif
namespace c10 {
namespace cuda {
#ifdef THRUST_DEVICE_LOWER_BOUND_WORKS
template <typename Iter, typename Scalar>
__forceinline__ __device__ Iter
lower_bound(Iter start, Iter end, Scalar value) {
return thrust::lower_bound(thrust::device, start, end, value);
}
#else
// thrust::lower_bound is broken on device, see
// https://github.com/NVIDIA/thrust/issues/1734 Implementation inspired by
// https://github.com/pytorch/pytorch/blob/805120ab572efef66425c9f595d9c6c464383336/aten/src/ATen/native/cuda/Bucketization.cu#L28
template <typename Iter, typename Scalar>
__device__ Iter lower_bound(Iter start, Iter end, Scalar value) {
while (start < end) {
auto mid = start + ((end - start) >> 1);
if (*mid < value) {
start = mid + 1;
} else {
end = mid;
}
}
return end;
}
#endif // THRUST_DEVICE_LOWER_BOUND_WORKS
} // namespace cuda
} // namespace c10
| 1,066
| 30.382353
| 130
|
h
|
null |
pytorch-main/c10/cuda/CUDACachingAllocator.h
|
#pragma once
#include <c10/core/Allocator.h>
#include <c10/core/StorageImpl.h>
#include <c10/cuda/CUDAGraphsC10Utils.h>
#include <c10/cuda/CUDAMacros.h>
#include <c10/cuda/CUDAStream.h>
#include <c10/util/Registry.h>
#include <array>
#include <mutex>
#include <set>
#include <unordered_set>
namespace c10 {
// Caching allocator will execute every registered callback if it unable to find
// block inside of already allocated area.
class C10_CUDA_API FreeMemoryCallback {
public:
virtual ~FreeMemoryCallback() = default;
virtual bool Execute() = 0;
};
C10_DECLARE_REGISTRY(FreeCudaMemoryCallbacksRegistry, FreeMemoryCallback);
#define REGISTER_FREE_MEMORY_CALLBACK(name, ...) \
C10_REGISTER_CLASS(FreeCudaMemoryCallbacksRegistry, name, __VA_ARGS__);
namespace cuda {
// TODO: Turn this into an honest to goodness class. I briefly attempted to do
// this, but it was a bit irritating to figure out how to also correctly
// apply pimpl pattern so I didn't have to leak any internal implementation
// details in the header (CUDACachingAllocator could be made a pimpl, but
// you also need to appropriately define a class which is a subclass
// of Allocator. Not impossible, but required a bit more surgery than
// I wanted to do at the time.)
//
// Why is this using a namespace rather than old-style THCCachingAllocator_
// prefix? Mostly because it made the HIPify rules easier to write; _ is
// not counted as a word boundary, so you would otherwise have to list each
// of these functions.
namespace CUDACachingAllocator {
struct Stat {
int64_t current = 0;
int64_t peak = 0;
int64_t allocated = 0;
int64_t freed = 0;
};
enum struct StatType : uint64_t {
AGGREGATE = 0,
SMALL_POOL = 1,
LARGE_POOL = 2,
NUM_TYPES = 3 // remember to update this whenever a new stat type is added
};
typedef std::array<Stat, static_cast<size_t>(StatType::NUM_TYPES)> StatArray;
// Struct containing memory allocator summary statistics for a device.
struct DeviceStats {
// COUNT: allocations requested by client code
StatArray allocation;
// COUNT: number of allocated segments from cudaMalloc().
StatArray segment;
// COUNT: number of active memory blocks (allocated or used by stream)
StatArray active;
// COUNT: number of inactive, split memory blocks (unallocated but can't be
// released via cudaFree)
StatArray inactive_split;
// SUM: bytes allocated by this memory alocator
StatArray allocated_bytes;
// SUM: bytes reserved by this memory allocator (both free and used)
StatArray reserved_bytes;
// SUM: bytes within active memory blocks
StatArray active_bytes;
// SUM: bytes within inactive, split memory blocks
StatArray inactive_split_bytes;
// SUM: bytes requested by client code
StatArray requested_bytes;
// COUNT: total number of failed calls to CUDA malloc necessitating cache
// flushes.
int64_t num_alloc_retries = 0;
// COUNT: total number of OOMs (i.e. failed calls to CUDA after cache flush)
int64_t num_ooms = 0;
// COUNT: total number of oversize blocks allocated from pool
Stat oversize_allocations;
// COUNT: total number of oversize blocks requiring malloc
Stat oversize_segments;
// SIZE: maximum block size that is allowed to be split.
int64_t max_split_size = 0;
};
typedef std::shared_ptr<GatheredContext> (*CreateContextFn)(void);
struct History {
void* addr;
size_t real_size; // unrounded, actually requested size
std::shared_ptr<GatheredContext> context; // per-watcher context
};
// Struct containing info of an allocation block (i.e. a fractional part of a
// cudaMalloc)..
struct BlockInfo {
int64_t size = 0;
int64_t requested_size = 0;
int32_t gc_counter = 0;
bool allocated = false;
bool active = false;
std::vector<History> history;
};
// Struct containing info of a memory segment (i.e. one contiguous cudaMalloc).
struct SegmentInfo {
int64_t device = 0;
int64_t address = 0;
int64_t total_size = 0;
int64_t requested_size = 0;
int64_t allocated_size = 0;
int64_t active_size = 0;
cudaStream_t stream = 0;
bool is_large = false;
bool is_expandable = false;
MempoolId_t owner_private_pool_id = {0, 0};
std::vector<BlockInfo> blocks;
};
struct AllocatorState {
virtual ~AllocatorState() = default;
};
struct TraceEntry {
enum Action {
ALLOC, // API made to the caching allocator for new memory
FREE_REQUESTED, // API call made to the caching allocator to free memory
FREE_COMPLETED, // The allocator might have to delay a free because
// it is still in use on another stream via record_stream
// This event is generated when a free actually completes.
SEGMENT_ALLOC, // a call to cudaMalloc to get more memory from the OS
SEGMENT_FREE, // a call to cudaFree to return memory to the OS (e.g. to
// defragment or empty_caches)
SEGMENT_MAP, // a call to cuMemMap (used with expandable_segments)
SEGMENT_UNMAP, // unmap part of a segment (used with expandable segments)
SNAPSHOT, // a call to snapshot, used to correlate memory snapshots to trace
// events
OOM // the allocator threw an OutOfMemoryError (addr_ is the amount of free
// bytes reported by cuda)
};
TraceEntry(
Action action,
int64_t addr,
size_t size,
cudaStream_t stream,
std::shared_ptr<GatheredContext> context = nullptr)
: action_(action),
addr_(addr),
context_(std::move(context)),
stream_(stream),
size_(size) {}
Action action_;
int64_t addr_; // for OOM, this is the amount of free bytes reported by cuda
std::shared_ptr<GatheredContext> context_;
cudaStream_t stream_;
int64_t size_;
};
struct SnapshotInfo {
std::vector<SegmentInfo> segments;
std::vector<std::vector<TraceEntry>> device_traces;
};
// returns the pointers freed in the pool
// and the pointers allocated. Note: a pointer
// may appear in both freed and allocated
struct CheckpointDelta {
std::vector<void*> ptrs_freed;
std::vector<at::DataPtr> dataptrs_allocd;
};
C10_CUDA_API void setAllocatorSettings(const std::string& env);
// Size pretty-printer
std::string format_size(uint64_t size);
using OutOfMemoryObserver = std::function<void(
int64_t device,
int64_t allocated,
int64_t device_total,
int64_t device_free)>;
class CUDAAllocator : public Allocator {
public:
virtual void* raw_alloc(size_t nbytes) = 0;
virtual void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) = 0;
virtual void raw_delete(void* ptr) = 0;
virtual void init(int device_count) = 0;
virtual bool initialized() = 0;
virtual void setMemoryFraction(double fraction, int device) = 0;
virtual void emptyCache() = 0;
virtual void cacheInfo(int dev_id, size_t* largestBlock) = 0;
virtual void* getBaseAllocation(void* ptr, size_t* size) = 0;
virtual void recordStream(const DataPtr&, CUDAStream stream) = 0;
virtual DeviceStats getDeviceStats(int device) = 0;
virtual void resetAccumulatedStats(int device) = 0;
virtual void resetPeakStats(int device) = 0;
virtual SnapshotInfo snapshot() = 0;
virtual void beginAllocateStreamToPool(
int device,
cudaStream_t stream,
MempoolId_t mempool_id) = 0;
virtual void endAllocateStreamToPool(int device, cudaStream_t stream) = 0;
virtual void releasePool(int device, MempoolId_t mempool_id) = 0;
// returns true if the allocated blocks are equal to expected live allocations
virtual bool checkPoolLiveAllocations(
int device,
MempoolId_t mempool_id,
const std::unordered_set<void*>& expected_live_allocations) {
TORCH_CHECK(
false,
name(),
" does not yet support checkPoolLiveAllocations. "
"If you need it, please file an issue describing your use case.");
}
virtual std::shared_ptr<void> getIpcDevPtr(std::string handle) = 0;
virtual bool isHistoryEnabled() {
TORCH_CHECK(
false,
name(),
" does not yet support recordHistory. "
"If you need it, please file an issue describing your use case.");
}
virtual void recordHistory(
bool enabled,
CreateContextFn context_recorder,
size_t alloc_trace_max_entries,
bool alloc_trace_record_context) = 0;
virtual void attachOutOfMemoryObserver(OutOfMemoryObserver observer) = 0;
virtual void enablePeerAccess(int dev, int dev_to_access) = 0;
// memory not allocated from cudaMalloc cannot be copied
// across devices using cudaMemcpyAsync if peer to peer access is disabled.
// instead it requres cudaMemcpyAsyncPeer
// with P2P Enabled, all combinations work
// with P2P Disabled:
// cudaMalloc cudaMallocAsync/cuMemMap
// cudaMemcpyAsyncPeer works works
// cudaMemcpyAsync works error
// This function performs chooses to use the Peer version of
// memcpy if required based on where the allocated put dst/src.
virtual cudaError_t memcpyAsync(
void* dst,
int dstDevice,
const void* src,
int srcDevice,
size_t count,
cudaStream_t stream,
bool p2p_enabled) = 0;
virtual std::shared_ptr<AllocatorState> getCheckpointState(
int device,
MempoolId_t id) = 0;
virtual CheckpointDelta setCheckpointPoolState(
int device,
std::shared_ptr<AllocatorState> pps) = 0;
virtual std::string name() = 0;
};
// Allocator object, statically initialized
// See BackendInitializer in CUDACachingAllocator.cpp.
// Atomic loads on x86 are just normal loads,
// (atomic stores are different), so reading this value
// is no different than loading a pointer.
C10_CUDA_API extern std::atomic<CUDAAllocator*> allocator;
inline CUDAAllocator* get() {
return allocator.load();
}
// Called directly by clients.
inline void* raw_alloc(size_t nbytes) {
return get()->raw_alloc(nbytes);
}
inline void* raw_alloc_with_stream(size_t nbytes, cudaStream_t stream) {
return get()->raw_alloc_with_stream(nbytes, stream);
}
inline void raw_delete(void* ptr) {
return get()->raw_delete(ptr);
}
inline void init(int device_count) {
return get()->init(device_count);
}
inline void setMemoryFraction(double fraction, int device) {
return get()->setMemoryFraction(fraction, device);
}
inline void emptyCache() {
return get()->emptyCache();
}
inline void cacheInfo(int dev_id, size_t* largestBlock) {
return get()->cacheInfo(dev_id, largestBlock);
}
inline void* getBaseAllocation(void* ptr, size_t* size) {
return get()->getBaseAllocation(ptr, size);
}
inline void recordStream(const DataPtr& dataPtr, CUDAStream stream) {
return get()->recordStream(dataPtr, stream);
}
inline DeviceStats getDeviceStats(int device) {
return get()->getDeviceStats(device);
}
inline void resetAccumulatedStats(int device) {
return get()->resetAccumulatedStats(device);
}
inline void resetPeakStats(int device) {
return get()->resetPeakStats(device);
}
inline SnapshotInfo snapshot() {
return get()->snapshot();
}
inline std::shared_ptr<AllocatorState> getCheckpointState(
int device,
MempoolId_t id) {
return get()->getCheckpointState(device, id);
}
inline CheckpointDelta setCheckpointPoolState(
int device,
std::shared_ptr<AllocatorState> pps) {
return get()->setCheckpointPoolState(device, pps);
}
// CUDAGraph interactions
inline void beginAllocateStreamToPool(
int device,
cudaStream_t stream,
MempoolId_t mempool_id) {
return get()->beginAllocateStreamToPool(device, stream, mempool_id);
}
inline void endAllocateStreamToPool(int device, cudaStream_t stream) {
return get()->endAllocateStreamToPool(device, stream);
}
inline void recordHistory(
bool enabled,
CreateContextFn context_recorder,
size_t alloc_trace_max_entries,
bool alloc_trace_record_context) {
return get()->recordHistory(
enabled,
context_recorder,
alloc_trace_max_entries,
alloc_trace_record_context);
}
inline bool isHistoryEnabled() {
return get()->isHistoryEnabled();
}
inline bool checkPoolLiveAllocations(
int device,
MempoolId_t mempool_id,
const std::unordered_set<void*>& expected_live_allocations) {
return get()->checkPoolLiveAllocations(
device, mempool_id, expected_live_allocations);
}
inline void attachOutOfMemoryObserver(OutOfMemoryObserver observer) {
return get()->attachOutOfMemoryObserver(observer);
}
inline void releasePool(int device, MempoolId_t mempool_id) {
return get()->releasePool(device, mempool_id);
}
// Not part of CUDA_ALLOCATOR_BACKEND_INTERFACE
inline std::shared_ptr<void> getIpcDevPtr(std::string handle) {
return get()->getIpcDevPtr(handle);
}
inline std::string name() {
return get()->name();
}
inline cudaError_t memcpyAsync(
void* dst,
int dstDevice,
const void* src,
int srcDevice,
size_t count,
cudaStream_t stream,
bool p2p_enabled) {
return get()->memcpyAsync(
dst, dstDevice, src, srcDevice, count, stream, p2p_enabled);
}
inline void enablePeerAccess(int dev, int dev_to_access) {
return get()->enablePeerAccess(dev, dev_to_access);
}
} // namespace CUDACachingAllocator
} // namespace cuda
} // namespace c10
| 13,193
| 30.117925
| 80
|
h
|
null |
pytorch-main/c10/cuda/CUDADeviceAssertion.h
|
#pragma once
#include <c10/cuda/CUDAException.h>
#include <c10/macros/Macros.h>
namespace c10 {
namespace cuda {
#ifdef TORCH_USE_CUDA_DSA
// Copy string from `src` to `dst`
static __device__ void dstrcpy(char* dst, const char* src) {
int i = 0;
// Copy string from source to destination, ensuring that it
// isn't longer than `C10_CUDA_DSA_MAX_STR_LEN-1`
while (*src != '\0' && i++ < C10_CUDA_DSA_MAX_STR_LEN - 1) {
*dst++ = *src++;
}
*dst = '\0';
}
static __device__ void dsa_add_new_assertion_failure(
DeviceAssertionsData* assertions_data,
const char* assertion_msg,
const char* filename,
const char* function_name,
const int line_number,
const uint32_t caller,
const dim3 block_id,
const dim3 thread_id) {
// `assertions_data` may be nullptr if device-side assertion checking
// is disabled at run-time. If it is disabled at compile time this
// function will never be called
if (!assertions_data) {
return;
}
// Atomically increment so other threads can fail at the same time
// Note that incrementing this means that the CPU can observe that
// a failure has happened and can begin to respond before we've
// written information about that failure out to the buffer.
const auto nid = atomicAdd(&(assertions_data->assertion_count), 1);
if (nid >= C10_CUDA_DSA_ASSERTION_COUNT) {
// At this point we're ran out of assertion buffer space.
// We could print a message about this, but that'd get
// spammy if a lot of threads did it, so we just silently
// ignore any other assertion failures. In most cases the
// failures will all probably be analogous anyway.
return;
}
// Write information about the assertion failure to memory.
// Note that this occurs only after the `assertion_count`
// increment broadcasts that there's been a problem.
auto& self = assertions_data->assertions[nid];
dstrcpy(self.assertion_msg, assertion_msg);
dstrcpy(self.filename, filename);
dstrcpy(self.function_name, function_name);
self.line_number = line_number;
self.caller = caller;
self.block_id[0] = block_id.x;
self.block_id[1] = block_id.y;
self.block_id[2] = block_id.z;
self.thread_id[0] = thread_id.x;
self.thread_id[1] = thread_id.y;
self.thread_id[2] = thread_id.z;
}
// Emulates a kernel assertion. The assertion won't stop the kernel's progress,
// so you should assume everything the kernel produces is garbage if there's an
// assertion failure.
// NOTE: This assumes that `assertions_data` and `assertion_caller_id` are
// arguments of the kernel and therefore accessible.
#define CUDA_KERNEL_ASSERT2(condition) \
do { \
if (C10_UNLIKELY(!(condition))) { \
/* Has an atomic element so threads can fail at the same time */ \
c10::cuda::dsa_add_new_assertion_failure( \
assertions_data, \
C10_STRINGIZE(condition), \
__FILE__, \
__FUNCTION__, \
__LINE__, \
assertion_caller_id, \
blockIdx, \
threadIdx); \
/* Now that the kernel has failed we early exit the kernel, but */ \
/* otherwise keep going and rely on the host to check UVM and */ \
/* determine we've had a problem */ \
return; \
} \
} while (false)
#else
#define CUDA_KERNEL_ASSERT2(condition) assert(condition)
#endif
} // namespace cuda
} // namespace c10
| 4,096
| 40.383838
| 79
|
h
|
null |
pytorch-main/c10/cuda/CUDADeviceAssertionHost.h
|
#pragma once
#include <c10/cuda/CUDAMacros.h>
#include <memory>
#include <mutex>
#include <string>
#include <vector>
#ifdef USE_CUDA
#define TORCH_USE_CUDA_DSA
#endif
/// Number of assertion failure messages we can store. If this is too small
/// threads will fail silently.
constexpr int C10_CUDA_DSA_ASSERTION_COUNT = 10;
constexpr int C10_CUDA_DSA_MAX_STR_LEN = 512;
namespace c10 {
namespace cuda {
/// Holds information about any device-side assertions that fail.
/// Held in managed memory and access by both the CPU and the GPU.
struct DeviceAssertionData {
/// Stringification of the assertion
char assertion_msg[C10_CUDA_DSA_MAX_STR_LEN];
/// File the assertion was in
char filename[C10_CUDA_DSA_MAX_STR_LEN];
/// Name of the function the assertion was in
char function_name[C10_CUDA_DSA_MAX_STR_LEN];
/// Line number the assertion was at
int line_number;
/// Number uniquely identifying the kernel launch that triggered the assertion
uint32_t caller;
/// block_id of the thread that failed the assertion
int32_t block_id[3];
/// third_id of the thread that failed the assertion
int32_t thread_id[3];
};
/// Used to hold assertions generated by the device
/// Held in managed memory and access by both the CPU and the GPU.
struct DeviceAssertionsData {
/// Total number of assertions found; a subset of thse will be recorded
/// in `assertions`
int32_t assertion_count;
/// An array of assertions that will be written to in a race-free manner
DeviceAssertionData assertions[C10_CUDA_DSA_ASSERTION_COUNT];
};
/// Use to hold info about kernel launches so that we can run kernels
/// asynchronously and still associate launches with device-side
/// assertion failures
struct CUDAKernelLaunchInfo {
/// Filename of the code where the kernel was launched from
const char* launch_filename;
/// Function from which the kernel was launched
const char* launch_function;
/// Line number of where the code was launched from
uint32_t launch_linenum;
/// Backtrace of where the kernel was launched from, only populated if
/// CUDAKernelLaunchRegistry::gather_launch_stacktrace is True
std::string launch_stacktrace;
/// Kernel that was launched
const char* kernel_name;
/// Device the kernel was launched on
int device;
/// Stream the kernel was launched on
int32_t stream;
/// A number that uniquely identifies the kernel launch
uint64_t generation_number;
};
/// Circular buffer used to hold information about kernel launches
/// this is later used to reconstruct how a device-side kernel assertion failure
/// occurred CUDAKernelLaunchRegistry is used as a singleton
class C10_CUDA_API CUDAKernelLaunchRegistry {
private:
/// Assume that this is the max number of kernel launches that might ever be
/// enqueued across all streams on a single device
static constexpr int max_kernel_launches = 1024;
/// How many kernel launch infos we've inserted. Used to ensure that circular
/// queue doesn't provide false information by always increasing, but also to
/// mark where we are inserting into the queue
#ifdef TORCH_USE_CUDA_DSA
uint64_t generation_number = 0;
#endif
/// Shared mutex between writer and accessor to ensure multi-threaded safety.
mutable std::mutex read_write_mutex;
/// Used to ensure prevent race conditions in GPU memory allocation
mutable std::mutex gpu_alloc_mutex;
/// Pointer to managed memory keeping track of device-side assertions. There
/// is one entry for each possible device the process might work with. Unused
/// entries are nullptrs. We could also use an unordered_set here, but this
/// vector design will be faster and the wasted memory is small since we
/// expect the number of GPUs per node will always be small
std::vector<
std::unique_ptr<DeviceAssertionsData, void (*)(DeviceAssertionsData*)>>
uvm_assertions;
/// A single circular buffer holds information about every kernel launch the
/// process makes across all devices.
std::vector<CUDAKernelLaunchInfo> kernel_launches;
bool check_env_for_enable_launch_stacktracing() const;
bool check_env_for_dsa_enabled() const;
public:
CUDAKernelLaunchRegistry();
/// Register a new kernel launch and obtain a generation number back to be
/// passed to the kernel
uint32_t insert(
const char* launch_filename,
const char* launch_function,
const uint32_t launch_linenum,
const char* kernel_name,
const int32_t stream_id);
/// Get copies of the kernel launch registry and each device's assertion
/// failure buffer so they can be inspected without raising race conditions
std::
pair<std::vector<DeviceAssertionsData>, std::vector<CUDAKernelLaunchInfo>>
snapshot() const;
/// Get a pointer to the current device's assertion failure buffer. If no such
/// buffer exists then one is created. This means that the first kernel launch
/// made on each device will be slightly slower because memory allocations are
/// required
DeviceAssertionsData* get_uvm_assertions_ptr_for_current_device();
/// Gets the global singleton of the registry
static CUDAKernelLaunchRegistry& get_singleton_ref();
/// If not all devices support DSA, we disable it
const bool do_all_devices_support_managed_memory = false;
/// Whether or not to gather stack traces when launching kernels
bool gather_launch_stacktrace = false;
/// Whether or not host-side DSA is enabled or disabled at run-time
/// Note: Device-side code cannot be enabled/disabled at run-time
bool enabled_at_runtime = false;
/// Whether or not a device has indicated a failure
bool has_failed() const;
#ifdef TORCH_USE_CUDA_DSA
const bool enabled_at_compile_time = true;
#else
const bool enabled_at_compile_time = false;
#endif
};
std::string c10_retrieve_device_side_assertion_info();
} // namespace cuda
} // namespace c10
// Each kernel launched with TORCH_DSA_KERNEL_LAUNCH
// requires the same input arguments. We introduce the following macro to
// standardize these.
#define TORCH_DSA_KERNEL_ARGS \
[[maybe_unused]] c10::cuda::DeviceAssertionsData *const assertions_data, \
[[maybe_unused]] uint32_t assertion_caller_id
// This macro can be used to pass the DSA arguments onward to another
// function
#define TORCH_DSA_KERNEL_ARGS_PASS assertions_data, assertion_caller_id
| 6,394
| 39.220126
| 80
|
h
|
null |
pytorch-main/c10/cuda/CUDAException.h
|
#pragma once
#include <c10/cuda/CUDADeviceAssertionHost.h>
#include <c10/cuda/CUDAMacros.h>
#include <c10/cuda/CUDAMiscFunctions.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/util/irange.h>
#include <cuda.h>
// Note [CHECK macro]
// ~~~~~~~~~~~~~~~~~~
// This is a macro so that AT_ERROR can get accurate __LINE__
// and __FILE__ information. We could split this into a short
// macro and a function implementation if we pass along __LINE__
// and __FILE__, but no one has found this worth doing.
// Used to denote errors from CUDA framework.
// This needs to be declared here instead util/Exception.h for proper conversion
// during hipify.
namespace c10 {
class C10_CUDA_API CUDAError : public c10::Error {
using Error::Error;
};
} // namespace c10
#define C10_CUDA_CHECK(EXPR) \
do { \
const cudaError_t __err = EXPR; \
c10::cuda::c10_cuda_check_implementation( \
static_cast<int32_t>(__err), \
__FILE__, \
__func__, /* Line number data type not well-defined between \
compilers, so we perform an explicit cast */ \
static_cast<uint32_t>(__LINE__), \
true); \
} while (0)
#define C10_CUDA_CHECK_WARN(EXPR) \
do { \
const cudaError_t __err = EXPR; \
if (C10_UNLIKELY(__err != cudaSuccess)) { \
auto error_unused C10_UNUSED = cudaGetLastError(); \
(void)error_unused; \
TORCH_WARN("CUDA warning: ", cudaGetErrorString(__err)); \
} \
} while (0)
// Indicates that a CUDA error is handled in a non-standard way
#define C10_CUDA_ERROR_HANDLED(EXPR) EXPR
// Intentionally ignore a CUDA error
#define C10_CUDA_IGNORE_ERROR(EXPR) \
do { \
const cudaError_t __err = EXPR; \
if (C10_UNLIKELY(__err != cudaSuccess)) { \
cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
(void)error_unused; \
} \
} while (0)
// Clear the last CUDA error
#define C10_CUDA_CLEAR_ERROR() \
do { \
cudaError_t error_unused C10_UNUSED = cudaGetLastError(); \
(void)error_unused; \
} while (0)
// This should be used directly after every kernel launch to ensure
// the launch happened correctly and provide an early, close-to-source
// diagnostic if it didn't.
#define C10_CUDA_KERNEL_LAUNCH_CHECK() C10_CUDA_CHECK(cudaGetLastError())
/// Launches a CUDA kernel appending to it all the information need to handle
/// device-side assertion failures. Checks that the launch was successful.
#define TORCH_DSA_KERNEL_LAUNCH( \
kernel, blocks, threads, shared_mem, stream, ...) \
do { \
auto& launch_registry = \
c10::cuda::CUDAKernelLaunchRegistry::get_singleton_ref(); \
kernel<<<blocks, threads, shared_mem, stream>>>( \
__VA_ARGS__, \
launch_registry.get_uvm_assertions_ptr_for_current_device(), \
launch_registry.insert( \
__FILE__, __FUNCTION__, __LINE__, #kernel, stream.id())); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
} while (0)
namespace c10 {
namespace cuda {
/// In the event of a CUDA failure, formats a nice error message about that
/// failure and also checks for device-side assertion failures
C10_CUDA_API void c10_cuda_check_implementation(
const int32_t err,
const char* filename,
const char* function_name,
const int line_number,
const bool include_device_assertions);
} // namespace cuda
} // namespace c10
| 4,577
| 43.446602
| 80
|
h
|
null |
pytorch-main/c10/cuda/CUDAFunctions.h
|
#pragma once
// This header provides C++ wrappers around commonly used CUDA API functions.
// The benefit of using C++ here is that we can raise an exception in the
// event of an error, rather than explicitly pass around error codes. This
// leads to more natural APIs.
//
// The naming convention used here matches the naming convention of torch.cuda
#include <c10/core/Device.h>
#include <c10/core/impl/GPUTrace.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAMacros.h>
#include <cuda_runtime_api.h>
namespace c10 {
namespace cuda {
// NB: In the past, we were inconsistent about whether or not this reported
// an error if there were driver problems are not. Based on experience
// interacting with users, it seems that people basically ~never want this
// function to fail; it should just return zero if things are not working.
// Oblige them.
// It still might log a warning for user first time it's invoked
C10_CUDA_API DeviceIndex device_count() noexcept;
// Version of device_count that throws is no devices are detected
C10_CUDA_API DeviceIndex device_count_ensure_non_zero();
C10_CUDA_API DeviceIndex current_device();
C10_CUDA_API void set_device(DeviceIndex device);
C10_CUDA_API void device_synchronize();
C10_CUDA_API void warn_or_error_on_sync();
// Raw CUDA device management functions
C10_CUDA_API cudaError_t GetDeviceCount(int* dev_count);
C10_CUDA_API cudaError_t GetDevice(int* device);
C10_CUDA_API cudaError_t SetDevice(int device);
C10_CUDA_API cudaError_t MaybeSetDevice(int device);
C10_CUDA_API int ExchangeDevice(int device);
C10_CUDA_API int MaybeExchangeDevice(int device);
C10_CUDA_API void SetTargetDevice();
enum class SyncDebugMode { L_DISABLED = 0, L_WARN, L_ERROR };
// this is a holder for c10 global state (similar to at GlobalContext)
// currently it's used to store cuda synchronization warning state,
// but can be expanded to hold other related global state, e.g. to
// record stream usage
class WarningState {
public:
void set_sync_debug_mode(SyncDebugMode l) {
sync_debug_mode = l;
}
SyncDebugMode get_sync_debug_mode() {
return sync_debug_mode;
}
private:
SyncDebugMode sync_debug_mode = SyncDebugMode::L_DISABLED;
};
C10_CUDA_API __inline__ WarningState& warning_state() {
static WarningState warning_state_;
return warning_state_;
}
// the subsequent functions are defined in the header because for performance
// reasons we want them to be inline
C10_CUDA_API void __inline__ memcpy_and_sync(
void* dst,
const void* src,
int64_t nbytes,
cudaMemcpyKind kind,
cudaStream_t stream) {
if (C10_UNLIKELY(
warning_state().get_sync_debug_mode() != SyncDebugMode::L_DISABLED)) {
warn_or_error_on_sync();
}
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_stream_synchronization(
reinterpret_cast<uintptr_t>(stream));
}
#if defined(TORCH_HIP_VERSION) && (TORCH_HIP_VERSION >= 301)
C10_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream));
#else
C10_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
C10_CUDA_CHECK(cudaStreamSynchronize(stream));
#endif
}
C10_CUDA_API void __inline__ stream_synchronize(cudaStream_t stream) {
if (C10_UNLIKELY(
warning_state().get_sync_debug_mode() != SyncDebugMode::L_DISABLED)) {
warn_or_error_on_sync();
}
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_stream_synchronization(
reinterpret_cast<uintptr_t>(stream));
}
C10_CUDA_CHECK(cudaStreamSynchronize(stream));
}
C10_CUDA_API bool hasPrimaryContext(int64_t device_index);
C10_CUDA_API c10::optional<int64_t> getDeviceIndexWithPrimaryContext();
} // namespace cuda
} // namespace c10
| 3,848
| 31.344538
| 80
|
h
|
null |
pytorch-main/c10/cuda/CUDAGraphsC10Utils.h
|
#pragma once
#include <c10/cuda/CUDAStream.h>
#include <utility>
// CUDA Graphs utils used by c10 and aten.
// aten/cuda/CUDAGraphsUtils.cuh adds utils used by aten only.
namespace c10 {
namespace cuda {
using CaptureId_t = unsigned long long;
// first is set if the instance is created by CUDAGraph::capture_begin.
// second is set if the instance is created by at::cuda::graph_pool_handle.
using MempoolId_t = std::pair<CaptureId_t, CaptureId_t>;
// RAII guard for "cudaStreamCaptureMode", a thread-local value
// that controls the error-checking strictness of a capture.
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
struct C10_CUDA_API CUDAStreamCaptureModeGuard {
CUDAStreamCaptureModeGuard(cudaStreamCaptureMode desired) {
strictness_ = desired;
C10_CUDA_CHECK(cudaThreadExchangeStreamCaptureMode(&strictness_));
}
~CUDAStreamCaptureModeGuard() {
C10_CUDA_CHECK_WARN(cudaThreadExchangeStreamCaptureMode(&strictness_));
}
private:
cudaStreamCaptureMode strictness_;
};
#endif
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
// Protects against enum cudaStreamCaptureStatus implementation changes.
// Some compilers seem not to like static_assert without the messages.
static_assert(
int(cudaStreamCaptureStatus::cudaStreamCaptureStatusNone) == 0,
"unexpected int(cudaStreamCaptureStatusNone) value");
static_assert(
int(cudaStreamCaptureStatus::cudaStreamCaptureStatusActive) == 1,
"unexpected int(cudaStreamCaptureStatusActive) value");
static_assert(
int(cudaStreamCaptureStatus::cudaStreamCaptureStatusInvalidated) == 2,
"unexpected int(cudaStreamCaptureStatusInvalidated) value");
#endif
enum class CaptureStatus : int {
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
None = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusNone),
Active = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusActive),
Invalidated = int(cudaStreamCaptureStatus::cudaStreamCaptureStatusInvalidated)
#else
None = 0
#endif
};
inline std::ostream& operator<<(std::ostream& os, CaptureStatus status) {
switch (status) {
case CaptureStatus::None:
os << "cudaStreamCaptureStatusNone";
break;
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
case CaptureStatus::Active:
os << "cudaStreamCaptureStatusActive";
break;
case CaptureStatus::Invalidated:
os << "cudaStreamCaptureStatusInvalidated";
break;
#endif
default:
TORCH_INTERNAL_ASSERT(
false, "Unknown CUDA graph CaptureStatus", int(status));
}
return os;
}
// Use this version where you're sure a CUDA context exists already.
inline CaptureStatus currentStreamCaptureStatusMayInitCtx() {
#if !defined(USE_ROCM) || ROCM_VERSION >= 50300
cudaStreamCaptureStatus is_capturing;
C10_CUDA_CHECK(
cudaStreamIsCapturing(c10::cuda::getCurrentCUDAStream(), &is_capturing));
return CaptureStatus(is_capturing);
#else
return CaptureStatus::None;
#endif
}
} // namespace cuda
} // namespace c10
| 2,979
| 31.043011
| 80
|
h
|
null |
pytorch-main/c10/cuda/CUDAGuard.h
|
#pragma once
#include <c10/core/DeviceType.h>
#include <c10/core/impl/InlineDeviceGuard.h>
#include <c10/core/impl/InlineStreamGuard.h>
#include <c10/cuda/CUDAMacros.h>
#include <c10/cuda/impl/CUDAGuardImpl.h>
#include <cstddef>
namespace c10 {
namespace cuda {
// This code is kind of boilerplatey. See Note [Whither the DeviceGuard
// boilerplate]
/// A variant of DeviceGuard that is specialized for CUDA. It accepts
/// integer indices (interpreting them as CUDA devices) and is a little
/// more efficient than DeviceGuard (it compiles to straight line
/// cudaSetDevice/cudaGetDevice calls); however, it can only be used
/// from code that links against CUDA directly.
struct CUDAGuard {
/// No default constructor; see Note [Omitted default constructor from RAII]
explicit CUDAGuard() = delete;
/// Set the current CUDA device to the passed device index.
explicit CUDAGuard(DeviceIndex device_index) : guard_(device_index) {}
/// Sets the current CUDA device to the passed device. Errors if the passed
/// device is not a CUDA device.
explicit CUDAGuard(Device device) : guard_(device) {}
// Copy is not allowed
CUDAGuard(const CUDAGuard&) = delete;
CUDAGuard& operator=(const CUDAGuard&) = delete;
// Move is not allowed (there is no uninitialized state)
CUDAGuard(CUDAGuard&& other) = delete;
CUDAGuard& operator=(CUDAGuard&& other) = delete;
/// Sets the CUDA device to the given device. Errors if the given device
/// is not a CUDA device.
void set_device(Device device) {
guard_.set_device(device);
}
/// Sets the CUDA device to the given device. Errors if the given device
/// is not a CUDA device. (This method is provided for uniformity with
/// DeviceGuard).
void reset_device(Device device) {
guard_.reset_device(device);
}
/// Sets the CUDA device to the given device index.
void set_index(DeviceIndex device_index) {
guard_.set_index(device_index);
}
/// Returns the device that was set upon construction of the guard
Device original_device() const {
return guard_.original_device();
}
/// Returns the last device that was set via `set_device`, if any, otherwise
/// the device passed during construction.
Device current_device() const {
return guard_.current_device();
}
private:
/// The guard for the current device.
c10::impl::InlineDeviceGuard<impl::CUDAGuardImpl> guard_;
};
/// A variant of OptionalDeviceGuard that is specialized for CUDA. See
/// CUDAGuard for when you can use this.
struct OptionalCUDAGuard {
/// Create an uninitialized OptionalCUDAGuard.
explicit OptionalCUDAGuard() : guard_() {}
/// Set the current CUDA device to the passed Device, if it is not nullopt.
explicit OptionalCUDAGuard(optional<Device> device_opt)
: guard_(device_opt) {}
/// Set the current CUDA device to the passed device index, if it is not
/// nullopt
explicit OptionalCUDAGuard(optional<DeviceIndex> device_index_opt)
: guard_(device_index_opt) {}
// Copy is not allowed
OptionalCUDAGuard(const OptionalCUDAGuard&) = delete;
OptionalCUDAGuard& operator=(const OptionalCUDAGuard&) = delete;
// See Note [Move construction for RAII guards is tricky]
OptionalCUDAGuard(OptionalCUDAGuard&& other) = delete;
// See Note [Move assignment for RAII guards is tricky]
OptionalCUDAGuard& operator=(OptionalCUDAGuard&& other) = delete;
/// Sets the CUDA device to the given device, initializing the guard if it
/// is not already initialized. Errors if the given device is not a CUDA
/// device.
void set_device(Device device) {
guard_.set_device(device);
}
/// Sets the CUDA device to the given device, initializing the guard if it is
/// not already initialized. Errors if the given device is not a CUDA device.
/// (This method is provided for uniformity with OptionalDeviceGuard).
void reset_device(Device device) {
guard_.reset_device(device);
}
/// Sets the CUDA device to the given device index, initializing the guard if
/// it is not already initialized.
void set_index(DeviceIndex device_index) {
guard_.set_index(device_index);
}
/// Returns the device that was set immediately prior to initialization of the
/// guard, or nullopt if the guard is uninitialized.
optional<Device> original_device() const {
return guard_.original_device();
}
/// Returns the most recent device that was set using this device guard,
/// either from construction, or via set_device, if the guard is initialized,
/// or nullopt if the guard is uninitialized.
optional<Device> current_device() const {
return guard_.current_device();
}
/// Restore the original CUDA device, resetting this guard to uninitialized
/// state.
void reset() {
guard_.reset();
}
private:
c10::impl::InlineOptionalDeviceGuard<impl::CUDAGuardImpl> guard_;
};
/// A variant of StreamGuard that is specialized for CUDA. See CUDAGuard
/// for when you can use this.
struct CUDAStreamGuard {
/// No default constructor, see Note [Omitted default constructor from RAII]
explicit CUDAStreamGuard() = delete;
/// Set the current CUDA device to the device associated with the passed
/// stream, and set the current CUDA stream on that device to the passed
/// stream. Errors if the Stream is not a CUDA stream.
explicit CUDAStreamGuard(Stream stream) : guard_(stream) {}
/// Copy is disallowed
CUDAStreamGuard(const CUDAStreamGuard&) = delete;
CUDAStreamGuard& operator=(const CUDAStreamGuard&) = delete;
/// Move is disallowed, as CUDAStreamGuard does not have an uninitialized
/// state, which is required for moves on types with nontrivial destructors.
CUDAStreamGuard(CUDAStreamGuard&& other) = delete;
CUDAStreamGuard& operator=(CUDAStreamGuard&& other) = delete;
/// Resets the currently set stream to the original stream and
/// the currently set device to the original device. Then,
/// set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
/// Errors if the stream passed is not a CUDA stream.
///
/// NOTE: this implementation may skip some stream/device setting if
/// it can prove that it is unnecessary.
///
/// WARNING: reset_stream does NOT preserve previously set streams on
/// different devices. If you need to set streams on multiple devices
/// on CUDA, use CUDAMultiStreamGuard instead.
void reset_stream(Stream stream) {
guard_.reset_stream(stream);
}
/// Returns the CUDA stream that was set at the time the guard was
/// constructed.
CUDAStream original_stream() const {
return CUDAStream(CUDAStream::UNCHECKED, guard_.original_stream());
}
/// Returns the most recent CUDA stream that was set using this device guard,
/// either from construction, or via set_stream.
CUDAStream current_stream() const {
return CUDAStream(CUDAStream::UNCHECKED, guard_.current_stream());
}
/// Returns the most recent CUDA device that was set using this device guard,
/// either from construction, or via set_device/reset_device/set_index.
Device current_device() const {
return guard_.current_device();
}
/// Returns the CUDA device that was set at the most recent reset_stream(),
/// or otherwise the device at construction time.
Device original_device() const {
return guard_.original_device();
}
private:
c10::impl::InlineStreamGuard<impl::CUDAGuardImpl> guard_;
};
/// A variant of OptionalStreamGuard that is specialized for CUDA. See
/// CUDAGuard for when you can use this.
struct OptionalCUDAStreamGuard {
/// Create an uninitialized guard.
explicit OptionalCUDAStreamGuard() : guard_() {}
/// Set the current CUDA device to the device associated with the passed
/// stream, and set the current CUDA stream on that device to the passed
/// stream. Errors if the Stream is not a CUDA stream.
explicit OptionalCUDAStreamGuard(Stream stream) : guard_(stream) {}
/// Set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream,
/// if the passed stream is not nullopt.
explicit OptionalCUDAStreamGuard(optional<Stream> stream_opt)
: guard_(stream_opt) {}
/// Copy is disallowed
OptionalCUDAStreamGuard(const OptionalCUDAStreamGuard&) = delete;
OptionalCUDAStreamGuard& operator=(const OptionalCUDAStreamGuard&) = delete;
// See Note [Move construction for RAII guards is tricky]
OptionalCUDAStreamGuard(OptionalCUDAStreamGuard&& other) = delete;
// See Note [Move assignment for RAII guards is tricky]
OptionalCUDAStreamGuard& operator=(OptionalCUDAStreamGuard&& other) = delete;
/// Resets the currently set CUDA stream to the original stream and
/// the currently set device to the original device. Then,
/// set the current device to the device associated with the passed stream,
/// and set the current stream on that device to the passed stream.
/// Initializes the guard if it was not previously initialized.
void reset_stream(Stream stream) {
guard_.reset_stream(stream);
}
/// Returns the CUDA stream that was set at the time the guard was most
/// recently initialized, or nullopt if the guard is uninitialized.
optional<CUDAStream> original_stream() const {
auto r = guard_.original_stream();
if (r.has_value()) {
return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
} else {
return nullopt;
}
}
/// Returns the most recent CUDA stream that was set using this stream guard,
/// either from construction, or via reset_stream, if the guard is
/// initialized, or nullopt if the guard is uninitialized.
optional<CUDAStream> current_stream() const {
auto r = guard_.current_stream();
if (r.has_value()) {
return make_optional(CUDAStream(CUDAStream::UNCHECKED, r.value()));
} else {
return nullopt;
}
}
/// Restore the original CUDA device and stream, resetting this guard to
/// uninitialized state.
void reset() {
guard_.reset();
}
private:
c10::impl::InlineOptionalStreamGuard<impl::CUDAGuardImpl> guard_;
};
/// A variant of MultiStreamGuard that is specialized for CUDA.
struct CUDAMultiStreamGuard {
explicit CUDAMultiStreamGuard(ArrayRef<CUDAStream> streams)
: guard_(unwrapStreams(streams)) {}
/// Copy is disallowed
CUDAMultiStreamGuard(const CUDAMultiStreamGuard&) = delete;
CUDAMultiStreamGuard& operator=(const CUDAMultiStreamGuard&) = delete;
// See Note [Move construction for RAII guards is tricky]
CUDAMultiStreamGuard(CUDAMultiStreamGuard&& other) = delete;
// See Note [Move assignment for RAII guards is tricky]
CUDAMultiStreamGuard& operator=(CUDAMultiStreamGuard&& other) = delete;
private:
c10::impl::InlineMultiStreamGuard<impl::CUDAGuardImpl> guard_;
static std::vector<Stream> unwrapStreams(ArrayRef<CUDAStream> cudaStreams) {
std::vector<Stream> streams;
streams.reserve(cudaStreams.size());
for (const CUDAStream& cudaStream : cudaStreams) {
streams.push_back(cudaStream);
}
return streams;
}
};
} // namespace cuda
} // namespace c10
| 11,212
| 35.643791
| 80
|
h
|
null |
pytorch-main/c10/cuda/CUDAMacros.h
|
#pragma once
#ifndef C10_USING_CUSTOM_GENERATED_MACROS
// We have not yet modified the AMD HIP build to generate this file so
// we add an extra option to specifically ignore it.
#ifndef C10_CUDA_NO_CMAKE_CONFIGURE_FILE
#include <c10/cuda/impl/cuda_cmake_macros.h>
#endif // C10_CUDA_NO_CMAKE_CONFIGURE_FILE
#endif
// See c10/macros/Export.h for a detailed explanation of what the function
// of these macros are. We need one set of macros for every separate library
// we build.
#ifdef _WIN32
#if defined(C10_CUDA_BUILD_SHARED_LIBS)
#define C10_CUDA_EXPORT __declspec(dllexport)
#define C10_CUDA_IMPORT __declspec(dllimport)
#else
#define C10_CUDA_EXPORT
#define C10_CUDA_IMPORT
#endif
#else // _WIN32
#if defined(__GNUC__)
#define C10_CUDA_EXPORT __attribute__((__visibility__("default")))
#else // defined(__GNUC__)
#define C10_CUDA_EXPORT
#endif // defined(__GNUC__)
#define C10_CUDA_IMPORT C10_CUDA_EXPORT
#endif // _WIN32
// This one is being used by libc10_cuda.so
#ifdef C10_CUDA_BUILD_MAIN_LIB
#define C10_CUDA_API C10_CUDA_EXPORT
#else
#define C10_CUDA_API C10_CUDA_IMPORT
#endif
/**
* The maximum number of GPUs that we recognizes.
*/
#define C10_COMPILE_TIME_MAX_GPUS 16
| 1,193
| 25.533333
| 77
|
h
|
null |
pytorch-main/c10/cuda/CUDAMathCompat.h
|
#pragma once
/* This file defines math functions compatible across different gpu
* platforms (currently CUDA and HIP).
*/
#if defined(__CUDACC__) || defined(__HIPCC__)
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#ifdef __HIPCC__
#define __MATH_FUNCTIONS_DECL__ inline C10_DEVICE
#else /* __HIPCC__ */
#ifdef __CUDACC_RTC__
#define __MATH_FUNCTIONS_DECL__ C10_HOST_DEVICE
#else /* __CUDACC_RTC__ */
#define __MATH_FUNCTIONS_DECL__ static inline C10_HOST_DEVICE
#endif /* __CUDACC_RTC__ */
#endif /* __HIPCC__ */
namespace c10 {
namespace cuda {
namespace compat {
__MATH_FUNCTIONS_DECL__ float abs(float x) {
return ::fabsf(x);
}
__MATH_FUNCTIONS_DECL__ double abs(double x) {
return ::fabs(x);
}
__MATH_FUNCTIONS_DECL__ float exp(float x) {
return ::expf(x);
}
__MATH_FUNCTIONS_DECL__ double exp(double x) {
return ::exp(x);
}
__MATH_FUNCTIONS_DECL__ float ceil(float x) {
return ::ceilf(x);
}
__MATH_FUNCTIONS_DECL__ double ceil(double x) {
return ::ceil(x);
}
__MATH_FUNCTIONS_DECL__ float copysign(float x, float y) {
#if defined(__CUDA_ARCH__) || defined(__HIPCC__)
return ::copysignf(x, y);
#else
// std::copysign gets ICE/Segfaults with gcc 7.5/8 on arm64
// (e.g. Jetson), see PyTorch PR #51834
// This host function needs to be here for the compiler but is never used
TORCH_INTERNAL_ASSERT(
false, "CUDAMathCompat copysign should not run on the CPU");
#endif
}
__MATH_FUNCTIONS_DECL__ double copysign(double x, double y) {
#if defined(__CUDA_ARCH__) || defined(__HIPCC__)
return ::copysign(x, y);
#else
// see above
TORCH_INTERNAL_ASSERT(
false, "CUDAMathCompat copysign should not run on the CPU");
#endif
}
__MATH_FUNCTIONS_DECL__ float floor(float x) {
return ::floorf(x);
}
__MATH_FUNCTIONS_DECL__ double floor(double x) {
return ::floor(x);
}
__MATH_FUNCTIONS_DECL__ float log(float x) {
return ::logf(x);
}
__MATH_FUNCTIONS_DECL__ double log(double x) {
return ::log(x);
}
__MATH_FUNCTIONS_DECL__ float log1p(float x) {
return ::log1pf(x);
}
__MATH_FUNCTIONS_DECL__ double log1p(double x) {
return ::log1p(x);
}
__MATH_FUNCTIONS_DECL__ float max(float x, float y) {
return ::fmaxf(x, y);
}
__MATH_FUNCTIONS_DECL__ double max(double x, double y) {
return ::fmax(x, y);
}
__MATH_FUNCTIONS_DECL__ float min(float x, float y) {
return ::fminf(x, y);
}
__MATH_FUNCTIONS_DECL__ double min(double x, double y) {
return ::fmin(x, y);
}
__MATH_FUNCTIONS_DECL__ float pow(float x, float y) {
return ::powf(x, y);
}
__MATH_FUNCTIONS_DECL__ double pow(double x, double y) {
return ::pow(x, y);
}
__MATH_FUNCTIONS_DECL__ void sincos(float x, float* sptr, float* cptr) {
return ::sincosf(x, sptr, cptr);
}
__MATH_FUNCTIONS_DECL__ void sincos(double x, double* sptr, double* cptr) {
return ::sincos(x, sptr, cptr);
}
__MATH_FUNCTIONS_DECL__ float sqrt(float x) {
return ::sqrtf(x);
}
__MATH_FUNCTIONS_DECL__ double sqrt(double x) {
return ::sqrt(x);
}
__MATH_FUNCTIONS_DECL__ float rsqrt(float x) {
return ::rsqrtf(x);
}
__MATH_FUNCTIONS_DECL__ double rsqrt(double x) {
return ::rsqrt(x);
}
__MATH_FUNCTIONS_DECL__ float tan(float x) {
return ::tanf(x);
}
__MATH_FUNCTIONS_DECL__ double tan(double x) {
return ::tan(x);
}
__MATH_FUNCTIONS_DECL__ float tanh(float x) {
return ::tanhf(x);
}
__MATH_FUNCTIONS_DECL__ double tanh(double x) {
return ::tanh(x);
}
__MATH_FUNCTIONS_DECL__ float normcdf(float x) {
return ::normcdff(x);
}
__MATH_FUNCTIONS_DECL__ double normcdf(double x) {
return ::normcdf(x);
}
} // namespace compat
} // namespace cuda
} // namespace c10
#endif
| 3,603
| 21.955414
| 75
|
h
|
null |
pytorch-main/c10/cuda/CUDAStream.h
|
#pragma once
#include <cstdint>
#include <utility>
#include <cuda_runtime_api.h>
#include <c10/core/DeviceGuard.h>
#include <c10/core/Stream.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/util/Exception.h>
/*
* Stream pool note.
*
* A CUDAStream is an abstraction of an actual cuStream on the GPU. CUDAStreams
* are backed by cuStreams, but they use several pools to minimize the costs
* associated with creating, retaining, and destroying cuStreams.
*
* There are three pools per device, and a device's pools are lazily created.
*
* The first pool contains only the default stream. When the default stream
* is requested it's returned.
*
* The second pool is the "low priority" or "default priority" streams. In
* HIP builds there is no distinction between streams in this pool and streams
* in the third pool (below). There are 32 of these streams per device, and
* when a stream is requested one of these streams is returned round-robin.
* That is, the first stream requested is at index 0, the second at index 1...
* to index 31, then index 0 again.
*
* This means that if 33 low priority streams are requested, the first and
* last streams requested are actually the same stream (under the covers)
* and kernels enqueued on them cannot run concurrently.
*
* The third pool is the "high priority" streams. The third pool acts like
* the second pool except the streams are created with a higher priority.
*
* These pools suggest that stream users should prefer many short-lived streams,
* as the cost of acquiring and releasing streams is effectively zero. If
* many longer-lived streams are required in performance critical scenarios
* then the functionality here may need to be extended to allow, for example,
* "reserving" a subset of the pool so that other streams do not accidentally
* overlap the performance critical streams.
*
* Note: although the notion of "current stream for device" is thread local
* (every OS thread has a separate current stream, as one might expect),
* the stream pool is global across all threads; stream 0 is always stream 0
* no matter which thread you use it on. Multiple threads can synchronize
* on the same stream. Although the CUDA documentation is not very clear
* on the matter, streams are thread safe; e.g., it is safe to enqueue
* a kernel on the same stream from two different threads.
*/
namespace c10 {
namespace cuda {
static constexpr int max_compile_time_stream_priorities = 4;
// Value object representing a CUDA stream. This is just a wrapper
// around c10::Stream, but it comes with a little extra CUDA-specific
// functionality (conversion to cudaStream_t), and a guarantee that
// the wrapped c10::Stream really is a CUDA stream.
class C10_CUDA_API CUDAStream {
public:
enum Unchecked { UNCHECKED };
/// Construct a CUDAStream from a Stream. This construction is checked,
/// and will raise an error if the Stream is not, in fact, a CUDA stream.
explicit CUDAStream(Stream stream) : stream_(stream) {
TORCH_CHECK(stream_.device_type() == DeviceType::CUDA);
}
/// Construct a CUDAStream from a Stream with no error checking.
/// This constructor uses the "named" constructor idiom, and can
/// be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream)
explicit CUDAStream(Unchecked, Stream stream) : stream_(stream) {}
bool operator==(const CUDAStream& other) const noexcept {
return unwrap() == other.unwrap();
}
bool operator!=(const CUDAStream& other) const noexcept {
return unwrap() != other.unwrap();
}
/// Implicit conversion to cudaStream_t.
operator cudaStream_t() const {
return stream();
}
/// Implicit conversion to Stream (a.k.a., forget that the stream is a
/// CUDA stream).
operator Stream() const {
return unwrap();
}
/// Used to avoid baking in device type explicitly to Python-side API.
DeviceType device_type() const {
return DeviceType::CUDA;
}
/// Get the CUDA device index that this stream is associated with.
DeviceIndex device_index() const {
return stream_.device_index();
}
/// Get the full Device that this stream is associated with. The Device
/// is guaranteed to be a CUDA device.
Device device() const {
return Device(DeviceType::CUDA, device_index());
}
/// Return the stream ID corresponding to this particular stream.
StreamId id() const {
return stream_.id();
}
bool query() const {
DeviceGuard guard{stream_.device()};
cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaStreamQuery(stream()));
if (err == cudaSuccess) {
return true;
} else if (err != cudaErrorNotReady) {
C10_CUDA_CHECK(err);
} else {
// ignore and clear the error if not ready
(void)cudaGetLastError();
}
return false;
}
void synchronize() const {
DeviceGuard guard{stream_.device()};
c10::cuda::stream_synchronize(stream());
}
int priority() const {
DeviceGuard guard{stream_.device()};
int priority = 0;
C10_CUDA_CHECK(cudaStreamGetPriority(stream(), &priority));
return priority;
}
/// Explicit conversion to cudaStream_t.
cudaStream_t stream() const;
/// Explicit conversion to Stream.
Stream unwrap() const {
return stream_;
}
/// Reversibly pack a CUDAStream into a struct representation.
/// Previously the stream's data was packed into a single int64_t,
/// as it was assumed the fields would not require more than
/// 64 bits of storage in total.
/// See https://github.com/pytorch/pytorch/issues/75854
/// for more information regarding newer platforms that may violate
/// this assumption.
///
/// The CUDAStream can be unpacked using unpack().
struct c10::StreamData3 pack3() const {
return stream_.pack3();
}
// Unpack a CUDAStream from the 3 fields generated by pack().
static CUDAStream unpack3(
StreamId stream_id,
DeviceIndex device_index,
DeviceType device_type) {
return CUDAStream(Stream::unpack3(stream_id, device_index, device_type));
}
static std::tuple<int, int> priority_range() {
// Note: this returns the range of priority **supported by PyTorch**, not
// the range of priority **supported by CUDA**. The former is a subset of
// the latter.
int least_priority, greatest_priority;
C10_CUDA_CHECK(
cudaDeviceGetStreamPriorityRange(&least_priority, &greatest_priority));
TORCH_INTERNAL_ASSERT(
least_priority == 0, "Unexpected CUDA stream priority range");
TORCH_INTERNAL_ASSERT(
greatest_priority <= -1, "Unexpected CUDA stream priority range");
greatest_priority = std::max(
-c10::cuda::max_compile_time_stream_priorities + 1, greatest_priority);
return std::make_tuple(least_priority, greatest_priority);
}
// Deleted for now; use CUDAEvent::block instead
// void synchronize_with(const CUDAEvent& event) const;
private:
Stream stream_;
};
/**
* Get a new stream from the CUDA stream pool. You can think of this
* as "creating" a new stream, but no such creation actually happens;
* instead, streams are preallocated from the pool and returned in a
* round-robin fashion.
*
* You can request a stream from the high priority pool by setting
* isHighPriority to true, or a stream for a specific device by setting device
* (defaulting to the current CUDA stream.)
*/
C10_API CUDAStream
getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
// no default priority to disambiguate overloads
C10_API CUDAStream
getStreamFromPool(const int priority, DeviceIndex device = -1);
/**
* Get a CUDAStream from a externally allocated one.
*
* This is mainly for interoperability with different libraries where we
* want to operate on a non-torch allocated stream for data exchange or similar
* purposes
*/
C10_API CUDAStream
getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index);
/**
* Get the default CUDA stream, for the passed CUDA device, or for the
* current device if no device index is passed. The default stream is
* where most computation occurs when you aren't explicitly using
* streams.
*/
C10_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
/**
* Get the current CUDA stream, for the passed CUDA device, or for the
* current device if no device index is passed. The current CUDA stream
* will usually be the default CUDA stream for the device, but it may
* be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard'
* or 'CUDAStreamGuard'.
*/
C10_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
/**
* Set the current stream on the device of the passed in stream to be
* the passed in stream. Yes, you read that right: this function
* has *nothing* to do with the current device: it toggles the current
* stream of the device of the passed stream.
*
* Confused? Avoid using this function; prefer using 'CUDAStreamGuard' instead
* (which will switch both your current device and current stream in the way you
* expect, and reset it back to its original state afterwards).
*/
C10_API void setCurrentCUDAStream(CUDAStream stream);
C10_API std::ostream& operator<<(std::ostream& stream, const CUDAStream& s);
} // namespace cuda
} // namespace c10
namespace std {
template <>
struct hash<c10::cuda::CUDAStream> {
size_t operator()(c10::cuda::CUDAStream s) const noexcept {
return std::hash<c10::Stream>{}(s.unwrap());
}
};
} // namespace std
| 9,494
| 34.561798
| 80
|
h
|
null |
pytorch-main/c10/cuda/driver_api.h
|
#pragma once
#include <cuda.h>
#define NVML_NO_UNVERSIONED_FUNC_DEFS
#include <nvml.h>
#define C10_CUDA_DRIVER_CHECK(EXPR) \
do { \
CUresult __err = EXPR; \
if (__err != CUDA_SUCCESS) { \
const char* err_str; \
CUresult get_error_str_err C10_UNUSED = \
c10::cuda::DriverAPI::get()->cuGetErrorString_(__err, &err_str); \
if (get_error_str_err != CUDA_SUCCESS) { \
AT_ERROR("CUDA driver error: unknown error"); \
} else { \
AT_ERROR("CUDA driver error: ", err_str); \
} \
} \
} while (0)
#define C10_FORALL_DRIVER_LIBRARIES(_) \
_("libcuda.so", 0) \
_("libnvidia-ml.so.1", 1)
#define C10_FORALL_DRIVER_API(_) \
_(cuMemAddressReserve, 0) \
_(cuMemRelease, 0) \
_(cuMemMap, 0) \
_(cuMemAddressFree, 0) \
_(cuMemSetAccess, 0) \
_(cuMemUnmap, 0) \
_(cuMemCreate, 0) \
_(cuGetErrorString, 0) \
_(nvmlInit_v2, 1) \
_(nvmlDeviceGetHandleByPciBusId_v2, 1) \
_(nvmlDeviceGetComputeRunningProcesses, 1)
namespace c10 {
namespace cuda {
struct DriverAPI {
#define CREATE_MEMBER(name, n) decltype(&name) name##_;
C10_FORALL_DRIVER_API(CREATE_MEMBER)
#undef CREATE_MEMBER
static DriverAPI* get();
};
} // namespace cuda
} // namespace c10
| 1,973
| 38.48
| 76
|
h
|
null |
pytorch-main/c10/cuda/impl/CUDAGuardImpl.h
|
#pragma once
#include <c10/core/DeviceGuard.h>
#include <c10/core/impl/DeviceGuardImplInterface.h>
#include <c10/core/impl/GPUTrace.h>
#include <c10/macros/Macros.h>
#include <c10/util/Exception.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <c10/cuda/CUDAException.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDAStream.h>
#include <cuda_runtime_api.h>
namespace c10 {
namespace cuda {
namespace impl {
struct CUDAGuardImpl final : public c10::impl::DeviceGuardImplInterface {
static constexpr DeviceType static_type = DeviceType::CUDA;
CUDAGuardImpl() = default;
explicit CUDAGuardImpl(DeviceType t) {
TORCH_INTERNAL_ASSERT(t == DeviceType::CUDA);
}
DeviceType type() const override {
return DeviceType::CUDA;
}
Device exchangeDevice(Device d) const override {
TORCH_INTERNAL_ASSERT(d.is_cuda());
int old_device_index = c10::cuda::ExchangeDevice(d.index());
return Device(DeviceType::CUDA, old_device_index);
}
Device getDevice() const override {
int device;
C10_CUDA_CHECK(c10::cuda::GetDevice(&device));
return Device(DeviceType::CUDA, device);
}
c10::optional<Device> uncheckedGetDevice() const noexcept {
int device;
const auto err = C10_CUDA_ERROR_HANDLED(c10::cuda::GetDevice(&device));
C10_CUDA_CHECK_WARN(err);
if (err != cudaSuccess) {
return c10::nullopt;
}
return Device(DeviceType::CUDA, device);
}
void setDevice(Device d) const override {
TORCH_INTERNAL_ASSERT(d.is_cuda());
C10_CUDA_CHECK(c10::cuda::SetDevice(d.index()));
}
void uncheckedSetDevice(Device d) const noexcept override {
C10_CUDA_CHECK_WARN(c10::cuda::MaybeSetDevice(d.index()));
}
Stream getStream(Device d) const noexcept override {
return getCurrentCUDAStream(d.index()).unwrap();
}
Stream getDefaultStream(Device d) const override {
return getDefaultCUDAStream(d.index());
}
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
const override {
return getStreamFromPool(isHighPriority, d.index());
}
// NB: These do NOT set the current device
Stream exchangeStream(Stream s) const noexcept override {
CUDAStream cs(s);
auto old_stream = getCurrentCUDAStream(s.device().index());
setCurrentCUDAStream(cs);
return old_stream.unwrap();
}
DeviceIndex deviceCount() const noexcept override {
return device_count();
}
// Event-related functions
void createEvent(cudaEvent_t* cuda_event, const EventFlag flag) const {
// Maps PyTorch's Event::Flag to CUDA flag
auto cuda_flag = cudaEventDefault;
switch (flag) {
case EventFlag::PYTORCH_DEFAULT:
case EventFlag::CUDA_EVENT_DISABLE_TIMING:
cuda_flag = cudaEventDisableTiming;
break;
case EventFlag::BACKEND_DEFAULT:
case EventFlag::CUDA_EVENT_DEFAULT:
cuda_flag = cudaEventDefault;
break;
default:
TORCH_CHECK(false, "CUDA event received unknown flag");
}
C10_CUDA_CHECK(cudaEventCreateWithFlags(cuda_event, cuda_flag));
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_creation(
reinterpret_cast<uintptr_t>(cuda_event));
}
}
void destroyEvent(void* event, const DeviceIndex device_index)
const noexcept override {
if (!event)
return;
auto cuda_event = static_cast<cudaEvent_t>(event);
int orig_device;
C10_CUDA_CHECK_WARN(c10::cuda::GetDevice(&orig_device));
C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(device_index));
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_deletion(
reinterpret_cast<uintptr_t>(cuda_event));
}
C10_CUDA_CHECK_WARN(cudaEventDestroy(cuda_event));
C10_CUDA_CHECK_WARN(c10::cuda::SetDevice(orig_device));
}
void record(
void** event,
const Stream& stream,
const DeviceIndex device_index,
const EventFlag flag) const override {
TORCH_CHECK(
device_index == -1 || device_index == stream.device_index(),
"Event device index ",
device_index,
" does not match recording stream's device index ",
stream.device_index(),
".");
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(*event);
CUDAStream cuda_stream{stream};
// Moves to stream's device to record
const auto orig_device = getDevice();
setDevice(stream.device());
// Creates the event (lazily)
if (!cuda_event)
createEvent(&cuda_event, flag);
C10_CUDA_CHECK(cudaEventRecord(cuda_event, cuda_stream));
// Makes the void* point to the (possibly just allocated) CUDA event
*event = cuda_event;
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_record(
reinterpret_cast<uintptr_t>(cuda_event),
reinterpret_cast<uintptr_t>(cuda_stream.stream()));
}
// Resets device
setDevice(orig_device);
}
void block(void* event, const Stream& stream) const override {
if (!event)
return;
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
CUDAStream cuda_stream{stream};
const auto orig_device = getDevice();
setDevice(stream.device());
C10_CUDA_CHECK(cudaStreamWaitEvent(
cuda_stream,
cuda_event,
/*flags (must be zero)=*/0));
const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace();
if (C10_UNLIKELY(interp)) {
(*interp)->trace_gpu_event_wait(
reinterpret_cast<uintptr_t>(cuda_event),
reinterpret_cast<uintptr_t>(cuda_stream.stream()));
}
setDevice(orig_device);
}
// May be called from any device
bool queryEvent(void* event) const override {
if (!event)
return true;
cudaEvent_t cuda_event = static_cast<cudaEvent_t>(event);
const cudaError_t err = C10_CUDA_ERROR_HANDLED(cudaEventQuery(cuda_event));
if (err != cudaErrorNotReady) {
C10_CUDA_CHECK(err);
} else {
// ignore and clear the error if not ready
(void)cudaGetLastError();
}
return (err == cudaSuccess);
}
// Stream-related functions
bool queryStream(const Stream& stream) const override {
CUDAStream cuda_stream{stream};
return cuda_stream.query();
}
void synchronizeStream(const Stream& stream) const override {
CUDAStream cuda_stream{stream};
cuda_stream.synchronize();
}
void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
const override {
CUDAStream cuda_stream{stream};
CUDACachingAllocator::recordStream(data_ptr, cuda_stream);
}
};
} // namespace impl
} // namespace cuda
} // namespace c10
| 6,831
| 31.226415
| 80
|
h
|
null |
pytorch-main/c10/macros/Export.h
|
#ifndef C10_MACROS_EXPORT_H_
#define C10_MACROS_EXPORT_H_
/* Header file to define the common scaffolding for exported symbols.
*
* Export is by itself a quite tricky situation to deal with, and if you are
* hitting this file, make sure you start with the background here:
* - Linux: https://gcc.gnu.org/wiki/Visibility
* - Windows:
* https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017
*
* Do NOT include this file directly. Instead, use c10/macros/Macros.h
*/
// You do not need to edit this part of file unless you are changing the core
// pytorch export abstractions.
//
// This part defines the C10 core export and import macros. This is controlled
// by whether we are building shared libraries or not, which is determined
// during build time and codified in c10/core/cmake_macros.h.
// When the library is built as a shared lib, EXPORT and IMPORT will contain
// visibility attributes. If it is being built as a static lib, then EXPORT
// and IMPORT basically have no effect.
// As a rule of thumb, you should almost NEVER mix static and shared builds for
// libraries that depend on c10. AKA, if c10 is built as a static library, we
// recommend everything dependent on c10 to be built statically. If c10 is built
// as a shared library, everything dependent on it should be built as shared. In
// the PyTorch project, all native libraries shall use the macro
// C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static
// libraries.
// For build systems that do not directly depend on CMake and directly build
// from the source directory (such as Buck), one may not have a cmake_macros.h
// file at all. In this case, the build system is responsible for providing
// correct macro definitions corresponding to the cmake_macros.h.in file.
//
// In such scenarios, one should define the macro
// C10_USING_CUSTOM_GENERATED_MACROS
// to inform this header that it does not need to include the cmake_macros.h
// file.
#ifndef C10_USING_CUSTOM_GENERATED_MACROS
#include <c10/macros/cmake_macros.h>
#endif // C10_USING_CUSTOM_GENERATED_MACROS
#ifdef _WIN32
#define C10_HIDDEN
#if defined(C10_BUILD_SHARED_LIBS)
#define C10_EXPORT __declspec(dllexport)
#define C10_IMPORT __declspec(dllimport)
#else
#define C10_EXPORT
#define C10_IMPORT
#endif
#else // _WIN32
#if defined(__GNUC__)
#define C10_EXPORT __attribute__((__visibility__("default")))
#define C10_HIDDEN __attribute__((__visibility__("hidden")))
#else // defined(__GNUC__)
#define C10_EXPORT
#define C10_HIDDEN
#endif // defined(__GNUC__)
#define C10_IMPORT C10_EXPORT
#endif // _WIN32
#ifdef NO_EXPORT
#undef C10_EXPORT
#define C10_EXPORT
#endif
// Definition of an adaptive XX_API macro, that depends on whether you are
// building the library itself or not, routes to XX_EXPORT and XX_IMPORT.
// Basically, you will need to do this for each shared library that you are
// building, and the instruction is as follows: assuming that you are building
// a library called libawesome.so. You should:
// (1) for your cmake target (usually done by "add_library(awesome, ...)"),
// define a macro called AWESOME_BUILD_MAIN_LIB using
// target_compile_options.
// (2) define the AWESOME_API macro similar to the one below.
// And in the source file of your awesome library, use AWESOME_API to
// annotate public symbols.
// Here, for the C10 library, we will define the macro C10_API for both import
// and export.
// This one is being used by libc10.so
#ifdef C10_BUILD_MAIN_LIB
#define C10_API C10_EXPORT
#else
#define C10_API C10_IMPORT
#endif
// This one is being used by libtorch.so
#ifdef CAFFE2_BUILD_MAIN_LIB
#define TORCH_API C10_EXPORT
#else
#define TORCH_API C10_IMPORT
#endif
// You may be wondering: Whose brilliant idea was it to split torch_cuda into
// two pieces with confusing names?
// Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we
// tried to compile PyTorch for CUDA 11.1, which ran into relocation marker
// issues when linking big binaries.
// (https://github.com/pytorch/pytorch/issues/39968) We had two choices:
// (1) Stop supporting so many GPU architectures
// (2) Do something else
// We chose #2 and decided to split the behemoth that was torch_cuda into two
// smaller libraries, one with most of the core kernel functions (torch_cuda_cu)
// and the other that had..well..everything else (torch_cuda_cpp). The idea was
// this: instead of linking our static libraries (like the hefty
// libcudnn_static.a) with another huge library, torch_cuda, and run into pesky
// relocation marker issues, we could link our static libraries to a smaller
// part of torch_cuda (torch_cuda_cpp) and avoid the issues.
// libtorch_cuda_cu.so
#ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB
#define TORCH_CUDA_CU_API C10_EXPORT
#elif defined(BUILD_SPLIT_CUDA)
#define TORCH_CUDA_CU_API C10_IMPORT
#endif
// libtorch_cuda_cpp.so
#ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB
#define TORCH_CUDA_CPP_API C10_EXPORT
#elif defined(BUILD_SPLIT_CUDA)
#define TORCH_CUDA_CPP_API C10_IMPORT
#endif
// libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the
// same api)
#ifdef TORCH_CUDA_BUILD_MAIN_LIB
#define TORCH_CUDA_CPP_API C10_EXPORT
#define TORCH_CUDA_CU_API C10_EXPORT
#elif !defined(BUILD_SPLIT_CUDA)
#define TORCH_CUDA_CPP_API C10_IMPORT
#define TORCH_CUDA_CU_API C10_IMPORT
#endif
#if defined(TORCH_HIP_BUILD_MAIN_LIB)
#define TORCH_HIP_API C10_EXPORT
#else
#define TORCH_HIP_API C10_IMPORT
#endif
// Enums only need to be exported on windows for non-CUDA files
#if defined(_WIN32) && defined(__CUDACC__)
#define C10_API_ENUM C10_API
#else
#define C10_API_ENUM
#endif
#endif // C10_MACROS_MACROS_H_
| 5,684
| 35.677419
| 80
|
h
|
null |
pytorch-main/c10/macros/Macros.h
|
#ifndef C10_MACROS_MACROS_H_
#define C10_MACROS_MACROS_H_
#include <cassert>
/* Main entry for c10/macros.
*
* In your code, include c10/macros/Macros.h directly, instead of individual
* files in this folder.
*/
// For build systems that do not directly depend on CMake and directly build
// from the source directory (such as Buck), one may not have a cmake_macros.h
// file at all. In this case, the build system is responsible for providing
// correct macro definitions corresponding to the cmake_macros.h.in file.
//
// In such scenarios, one should define the macro
// C10_USING_CUSTOM_GENERATED_MACROS
// to inform this header that it does not need to include the cmake_macros.h
// file.
#ifndef C10_USING_CUSTOM_GENERATED_MACROS
#include <c10/macros/cmake_macros.h>
#endif // C10_USING_CUSTOM_GENERATED_MACROS
#include <c10/macros/Export.h>
#if defined(__clang__)
#define __ubsan_ignore_float_divide_by_zero__ \
__attribute__((no_sanitize("float-divide-by-zero")))
#define __ubsan_ignore_undefined__ __attribute__((no_sanitize("undefined")))
#define __ubsan_ignore_signed_int_overflow__ \
__attribute__((no_sanitize("signed-integer-overflow")))
#define __ubsan_ignore_function__ __attribute__((no_sanitize("function")))
#else
#define __ubsan_ignore_float_divide_by_zero__
#define __ubsan_ignore_undefined__
#define __ubsan_ignore_signed_int_overflow__
#define __ubsan_ignore_function__
#endif
// Detect address sanitizer as some stuff doesn't work with it
#undef C10_ASAN_ENABLED
// for clang
#if defined(__has_feature)
#if ((__has_feature(address_sanitizer)))
#define C10_ASAN_ENABLED 1
#endif
#endif
// for gcc
#if defined(__SANITIZE_ADDRESS__)
#if __SANITIZE_ADDRESS__
#if !defined(C10_ASAN_ENABLED)
#define C10_ASAN_ENABLED 1
#endif
#endif
#endif
#if !defined(C10_ASAN_ENABLED)
#define C10_ASAN_ENABLED 0
#endif
// Disable the copy and assignment operator for a class. Note that this will
// disable the usage of the class in std containers.
#define C10_DISABLE_COPY_AND_ASSIGN(classname) \
classname(const classname&) = delete; \
classname& operator=(const classname&) = delete
#define C10_CONCATENATE_IMPL(s1, s2) s1##s2
#define C10_CONCATENATE(s1, s2) C10_CONCATENATE_IMPL(s1, s2)
#define C10_MACRO_EXPAND(args) args
#define C10_STRINGIZE_IMPL(x) #x
#define C10_STRINGIZE(x) C10_STRINGIZE_IMPL(x)
/**
* C10_ANONYMOUS_VARIABLE(str) introduces an identifier starting with
* str and ending with a number that varies with the line.
*/
#ifdef __COUNTER__
#define C10_UID __COUNTER__
#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __COUNTER__)
#else
#define C10_UID __LINE__
#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __LINE__)
#endif
#ifdef __has_cpp_attribute
#define C10_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
#define C10_HAS_CPP_ATTRIBUTE(x) (0)
#endif
/// C10_NODISCARD - Warn if a type or return value is discarded.
// Technically, we should check if __cplusplus > 201402L here, because
// [[nodiscard]] is only defined in C++17. However, some compilers
// we care about don't advertise being C++17 (e.g., clang), but
// support the attribute anyway. In fact, this is not just a good idea,
// it's the law: clang::warn_unused_result doesn't work on nvcc + clang
// and the best workaround for this case is to use [[nodiscard]]
// instead; see https://github.com/pytorch/pytorch/issues/13118
//
// Note to future editors: if you have noticed that a compiler is
// misbehaving (e.g., it advertises support, but the support doesn't
// actually work, or it is emitting warnings). Some compilers which
// are strict about the matter include MSVC, which will complain:
//
// error C2429: attribute 'nodiscard' requires compiler flag '/std:c++latest'
//
// Exhibits:
// - MSVC 19.14: https://godbolt.org/z/Dzd7gn (requires /std:c++latest)
// - Clang 8.0.0: https://godbolt.org/z/3PYL4Z (always advertises support)
// - gcc 8.3: https://godbolt.org/z/4tLMQS (always advertises support)
#if C10_HAS_CPP_ATTRIBUTE(nodiscard)
#define C10_NODISCARD [[nodiscard]]
// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious
// error when __has_cpp_attribute is given a scoped attribute in C mode.
#elif __cplusplus && C10_HAS_CPP_ATTRIBUTE(clang::warn_unused_result)
// TODO: It's possible this is still triggering
// https://github.com/pytorch/pytorch/issues/13118 on Windows; if it is, better
// fix it.
#define C10_NODISCARD [[clang::warn_unused_result]]
#else
#define C10_NODISCARD
#endif
// suppress an unused variable.
#if defined(_MSC_VER) && !defined(__clang__)
#define C10_UNUSED __pragma(warning(suppress : 4100 4101))
#else
#define C10_UNUSED __attribute__((__unused__))
#endif //_MSC_VER
// Direct port of LLVM_ATTRIBUTE_USED.
#if __has_attribute(used)
#define C10_USED __attribute__((__used__))
#else
#define C10_USED
#endif
#define C10_RESTRICT __restrict
// Simply define the namespace, in case a dependent library want to refer to
// the c10 namespace but not any nontrivial files.
namespace c10 {} // namespace c10
namespace c10 {
namespace cuda {}
} // namespace c10
namespace c10 {
namespace hip {}
} // namespace c10
// Since C10 is the core library for caffe2 (and aten), we will simply reroute
// all abstractions defined in c10 to be available in caffe2 as well.
// This is only for backwards compatibility. Please use the symbols from the
// c10 namespace where possible.
namespace caffe2 {
using namespace c10;
}
namespace at {
using namespace c10;
}
namespace at {
namespace cuda {
using namespace c10::cuda;
}
} // namespace at
// WARNING!!! THIS IS A GIANT HACK!!!
// This line means you cannot simultaneously include c10/hip
// and c10/cuda and then use them from the at::cuda namespace.
// This is true in practice, because HIPIFY works inplace on
// files in ATen/cuda, so it assumes that c10::hip is available
// from at::cuda. This namespace makes that happen. When
// HIPIFY is no longer out-of-place, we can switch the cuda
// here to hip and everyone is happy.
namespace at {
namespace cuda {
using namespace c10::hip;
}
} // namespace at
// C10_LIKELY/C10_UNLIKELY
//
// These macros provide parentheses, so you can use these macros as:
//
// if C10_LIKELY(some_expr) {
// ...
// }
//
// NB: static_cast to boolean is mandatory in C++, because __builtin_expect
// takes a long argument, which means you may trigger the wrong conversion
// without it.
//
#if defined(__GNUC__) || defined(__ICL) || defined(__clang__)
#define C10_LIKELY(expr) (__builtin_expect(static_cast<bool>(expr), 1))
#define C10_UNLIKELY(expr) (__builtin_expect(static_cast<bool>(expr), 0))
#else
#define C10_LIKELY(expr) (expr)
#define C10_UNLIKELY(expr) (expr)
#endif
/// C10_NOINLINE - Functions whose declaration is annotated with this will not
/// be inlined.
#ifdef __GNUC__
#define C10_NOINLINE __attribute__((noinline))
#elif _MSC_VER
#define C10_NOINLINE __declspec(noinline)
#else
#define C10_NOINLINE
#endif
#if defined(_MSC_VER)
#define C10_ALWAYS_INLINE __forceinline
#elif __has_attribute(always_inline) || defined(__GNUC__)
#define C10_ALWAYS_INLINE __attribute__((__always_inline__)) inline
#else
#define C10_ALWAYS_INLINE inline
#endif
#if defined(_MSC_VER)
#define C10_ATTR_VISIBILITY_HIDDEN
#elif defined(__GNUC__)
#define C10_ATTR_VISIBILITY_HIDDEN __attribute__((__visibility__("hidden")))
#else
#define C10_ATTR_VISIBILITY_HIDDEN
#endif
#define C10_ERASE C10_ALWAYS_INLINE C10_ATTR_VISIBILITY_HIDDEN
// C10_FALLTHROUGH - Annotate fallthrough to the next case in a switch.
#if C10_HAS_CPP_ATTRIBUTE(fallthrough)
#define C10_FALLTHROUGH [[fallthrough]]
#else
#define C10_FALLTHROUGH
#endif
#include <cstdint>
#ifdef __HIPCC__
// Unlike CUDA, HIP requires a HIP header to be included for __host__ to work.
// We do this #include here so that C10_HOST_DEVICE and friends will Just Work.
// See https://github.com/ROCm-Developer-Tools/HIP/issues/441
#include <hip/hip_runtime.h>
#endif
#if defined(__CUDACC__) || defined(__HIPCC__)
// Designates functions callable from the host (CPU) and the device (GPU)
#define C10_HOST_DEVICE __host__ __device__
#define C10_DEVICE __device__
#define C10_HOST __host__
// constants from
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications)
// The maximum number of threads per multiprocessor is 1024 for Turing
// architecture (7.5), 1536 for Geforce Ampere (8.6)/Jetson Orin (8.7), and
// 2048 for all other architectures. You'll get warnings if you exceed these
// constants. Hence, the following macros adjust the input values from the user
// to resolve potential warnings.
#if __CUDA_ARCH__ == 750
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024;
#elif __CUDA_ARCH__ == 860 || __CUDA_ARCH__ == 870 || __CUDA_ARCH__ == 890
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1536;
#else
constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048;
#endif
// CUDA_MAX_THREADS_PER_BLOCK is same for all architectures currently
constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024;
// CUDA_THREADS_PER_BLOCK_FALLBACK is the "canonical fallback" choice of block
// size. 256 is a good number for this fallback and should give good occupancy
// and versatility across all architectures.
constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256;
// NOTE: if you are thinking of constexpr-ify the inputs to launch bounds, it
// turns out that although __launch_bounds__ can take constexpr, it
// can't take a constexpr that has anything to do with templates.
// Currently we use launch_bounds that depend on template arguments in
// Loops.cuh, Reduce.cuh and LossCTC.cuh. Hence, C10_MAX_THREADS_PER_BLOCK
// and C10_MIN_BLOCKS_PER_SM are kept as macros.
// Suppose you were planning to write __launch_bounds__(a, b), based on your
// performance tuning on a modern GPU. Instead, you should write
// __launch_bounds__(C10_MAX_THREADS_PER_BLOCK(a), C10_MIN_BLOCKS_PER_SM(a, b)),
// which will also properly respect limits on old architectures.
#define C10_MAX_THREADS_PER_BLOCK(val) \
(((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \
: CUDA_THREADS_PER_BLOCK_FALLBACK)
#define C10_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \
((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \
? (blocks_per_sm) \
: ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \
(threads_per_block))))
// C10_LAUNCH_BOUNDS is analogous to __launch_bounds__
#define C10_LAUNCH_BOUNDS_0 \
__launch_bounds__( \
256, 4) // default launch bounds that should give good occupancy and
// versatility across all architectures.
#define C10_LAUNCH_BOUNDS_1(max_threads_per_block) \
__launch_bounds__((C10_MAX_THREADS_PER_BLOCK((max_threads_per_block))))
#define C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \
__launch_bounds__( \
(C10_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \
(C10_MIN_BLOCKS_PER_SM((max_threads_per_block), (min_blocks_per_sm))))
#else
#define C10_HOST_DEVICE
#define C10_HOST
#define C10_DEVICE
#endif
#if defined(USE_ROCM)
#define C10_HIP_HOST_DEVICE __host__ __device__
#else
#define C10_HIP_HOST_DEVICE
#endif
#if defined(USE_ROCM)
#define C10_WARP_SIZE warpSize // = 64 or 32 (Defined in hip_runtime.h)
#else
#define C10_WARP_SIZE 32
#endif
#if defined(_MSC_VER) && _MSC_VER <= 1900
#define __func__ __FUNCTION__
#endif
// CUDA_KERNEL_ASSERT checks the assertion
// even when NDEBUG is defined. This is useful for important assertions in CUDA
// code that would otherwise be suppressed when building Release.
#if defined(__ANDROID__) || defined(__APPLE__) || \
(defined(USE_ROCM) && ROCM_VERSION < 40100)
// Those platforms do not support assert()
#define CUDA_KERNEL_ASSERT(cond)
#define SYCL_KERNEL_ASSERT(cond)
#elif defined(_MSC_VER)
#if defined(NDEBUG)
extern "C" {
C10_IMPORT
#if defined(__SYCL_DEVICE_ONLY__)
extern SYCL_EXTERNAL void _wassert(
const wchar_t* wexpr,
const wchar_t* wfile,
unsigned line);
#else
#if defined(__CUDA_ARCH__)
__host__ __device__
#endif // __CUDA_ARCH__
void
_wassert(wchar_t const* _Message, wchar_t const* _File, unsigned _Line);
#endif // __SYCL_DEVICE_ONLY__
}
#endif // NDEBUG
#define CUDA_KERNEL_ASSERT(cond) \
if (C10_UNLIKELY(!(cond))) { \
(void)(_wassert(_CRT_WIDE(#cond), _CRT_WIDE(__FILE__), static_cast<unsigned>(__LINE__)), 0); \
}
#define SYCL_KERNEL_ASSERT(cond) \
if (C10_UNLIKELY(!(cond))) { \
(void)(_wassert(_CRT_WIDE(#cond), _CRT_WIDE(__FILE__), static_cast<unsigned>(__LINE__)), 0); \
}
#else // __APPLE__, _MSC_VER
#if defined(NDEBUG)
extern "C" {
#if defined(__SYCL_DEVICE_ONLY__)
extern SYCL_EXTERNAL void __assert_fail(
const char* expr,
const char* file,
unsigned int line,
const char* func);
#else // __SYCL_DEVICE_ONLY__
#if ( \
defined(__CUDA_ARCH__) && !(defined(__clang__) && defined(__CUDA__)) && \
!defined(TORCH_DISABLE_GPU_ASSERTS))
// CUDA supports __assert_fail function which are common for both device
// and host side code.
__host__ __device__
#endif
// This forward declaration matching the declaration of __assert_fail
// exactly how it is in glibc in case parts of the program are compiled with
// different NDEBUG settings. Otherwise we might get 'ambiguous declaration'
// error. Note: On ROCm - this declaration serves for host side compilation.
void
__assert_fail(
const char* assertion,
const char* file,
unsigned int line,
const char* function) noexcept __attribute__((__noreturn__));
#if (defined(__HIP_ARCH__) || defined(__HIP__)) && \
!defined(TORCH_DISABLE_GPU_ASSERTS)
// ROCm supports __assert_fail only as a device side function.
__device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail(
const char* assertion,
const char* file,
unsigned int line,
const char* function);
#endif // defined(__HIP_ARCH__) || defined(__HIP__)
#endif // __SYCL_DEVICE_ONLY__
}
#endif // NDEBUG
#define CUDA_KERNEL_ASSERT(cond) \
if (C10_UNLIKELY(!(cond))) { \
__assert_fail( \
#cond, __FILE__, static_cast<unsigned int>(__LINE__), __func__); \
}
#define SYCL_KERNEL_ASSERT(cond) \
if (C10_UNLIKELY(!(cond))) { \
__assert_fail( \
#cond, __FILE__, static_cast<unsigned int>(__LINE__), __func__); \
}
#endif // __APPLE__
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
#if defined(__ANDROID__)
#define C10_ANDROID 1
#define C10_MOBILE 1
#elif ( \
defined(__APPLE__) && \
(TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE))
#define C10_IOS 1
#define C10_MOBILE 1
#endif // ANDROID / IOS
#if defined(C10_MOBILE) && C10_MOBILE
#define C10_ALWAYS_INLINE_UNLESS_MOBILE inline
#else
#define C10_ALWAYS_INLINE_UNLESS_MOBILE C10_ALWAYS_INLINE
#endif
// Portable determination of whether type T is trivially copyable.
// Warning: __has_trivial_copy for GCC may not always detect the non-POD
// correctly. For example, T = std::unique_ptr may evaluate to true and be
// treated as POD. This can cause unexpected behavior.
#if defined(__GNUG__) && __GNUC__ < 5 && !defined(__clang__)
#define C10_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T)
#else
#define C10_IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable<T>::value
#endif
#if defined(__CUDA_ARCH__)
#if defined(_MSC_VER) && defined(__CUDACC__)
#define CONSTEXPR_EXCEPT_WIN_CUDA const
#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__
// Note [static constexpr char* members for windows NVCC]
// The Windows NVCC compiler doesn't handle static constexpr class members,
// although it's fixed in a later version.
// (see
// https://developercommunity.visualstudio.com/t/intellisense-error-c11-static-constexpr-member-ini/245425)
//
// If we want to ensure that our field is static under all builds, then we need
// to work around it specifically for windows NVCC by making it (a) const, (b)
// defined outside of the class definition We need to define it outside of the
// class definition because of the C++ standard; char* is not an integral type
// (see
// https://stackoverflow.com/questions/24278473/intellisense-a-member-of-type-const-char-const-cannot-have-an-in-class-in)
//
// So instead of this:
// struct Foo {
// static constexpr const char* name = "foo";
// }
// In Windows NVCC, we end up with this:
// struct Foo {
// static const char* name;
// }
// const char* Foo::name = "foo";
//
// This gives us a small perf hit for any code that wants to access these field
// members, but right now it isn't used in any perf-critical code paths.
#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \
static const char* field;
#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \
const char* cls::field = val;
#else
#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr
#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__
#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \
static constexpr const char* field = val;
#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val)
#endif
#else
#if defined(_MSC_VER) && defined(__CUDACC__)
#define CONSTEXPR_EXCEPT_WIN_CUDA const
#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA
#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \
static const char* field;
#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \
const char* cls::field = val;
#else
#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr
#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA constexpr
#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \
static constexpr const char* field = val;
#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val)
#endif
#endif
#ifndef HAS_DEMANGLE
#if defined(__ANDROID__) || defined(_WIN32) || defined(__EMSCRIPTEN__)
#define HAS_DEMANGLE 0
#elif defined(__APPLE__) && \
(TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE)
#define HAS_DEMANGLE 0
#else
#define HAS_DEMANGLE 1
#endif
#endif // HAS_DEMANGLE
#define _C10_PRAGMA__(string) _Pragma(#string)
#define _C10_PRAGMA_(string) _C10_PRAGMA__(string)
#ifdef __clang__
#define C10_CLANG_DIAGNOSTIC_PUSH() _Pragma("clang diagnostic push")
#define C10_CLANG_DIAGNOSTIC_POP() _Pragma("clang diagnostic pop")
#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) \
_C10_PRAGMA_(clang diagnostic ignored flag)
#define C10_CLANG_HAS_WARNING(flag) __has_warning(flag)
#else
#define C10_CLANG_DIAGNOSTIC_PUSH()
#define C10_CLANG_DIAGNOSTIC_POP()
#define C10_CLANG_DIAGNOSTIC_IGNORE(flag)
#define C10_CLANG_HAS_WARNING(flag) 0
#endif
#ifdef __clang__
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \
_C10_PRAGMA_(clang diagnostic push) \
_C10_PRAGMA_(clang diagnostic ignored "-Wunknown-warning-option") \
_C10_PRAGMA_(clang diagnostic ignored warning)
#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(clang diagnostic pop)
#elif __GNUC__
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \
_C10_PRAGMA_(GCC diagnostic push) \
_C10_PRAGMA_(GCC diagnostic ignored "-Wpragmas") \
_C10_PRAGMA_(GCC diagnostic ignored warning)
#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(GCC diagnostic pop)
#else
#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning)
#define C10_DIAGNOSTIC_POP()
#endif
#endif // C10_MACROS_MACROS_H_
| 20,133
| 35.147217
| 122
|
h
|
null |
pytorch-main/c10/mobile/CPUCachingAllocator.h
|
#pragma once
#include <mutex>
#include <c10/util/SmallVector.h>
#include <c10/util/flat_hash_map.h>
/*
* CPUCachingAllocator:
* DISCLAIMER:
* This is subject to change (beta) and only supported on mobile builds.
* If code snippet such as in 'Usage pattern' is used outside of mobile
* build you will not observe the intended behavior.
* See below for more information.
* Why?
* It has been observed that some mobile platforms, such as pixel 3, return
* memory aggressively to the system. This results in page faults in some
* cases and ends up hurting performance. This caching allocator aims to address
* that. Furthermore it also allows users to specify their own allocator by
* implementing allocate/free virtual interfaces. What are the cons? There are
* some cons that were observed where use of caching allocator led to worse
* performance on some platforms. Reason being that the caching mechanism used
* by this allocator left us worse off compared to the corresponding platform's
* tuned memory allocator. In that case it seemed better to not use this
* allocator. Note there are some ideas to fix this in the works.
*
* Usage:
* Usage pattern:
* Instantiate and own the caching allocator.
* std::unique_ptr<c10::CPUCachingAllocator> caching_allocator =
* std::make_unique<c10::CPUCachingAllocator>();
* Use caching allocator with a scoped guard at inference time.
* {
* WithCPUCachingAllocatorGuard(caching_allocator.get());
* ... model.forward(...);
* }
*/
namespace c10 {
class C10_API CPUCachingAllocator {
/*
* What it does:
* Caches all the allocations carried out by this allocator.
* Cache key is the size of the allocation.
* If requested size is found in the cache returns the cached pointer.
* What it does not do:
* No speculative allocation for any future allocations.
*/
private:
inline void* allocate_and_cache(const size_t bytes);
void free_cached();
protected:
// Invariants.
// 1. If memory is ever allocated via this allocator then
// the pointer will exist in allocation_map_, unless the allocator
// returned the memory to OS via free_cached.
// 1.1. Therefore even when the said memory is "freed" via this
// allocator (and thus cached), it will continue to stay
// in allocation_map_. Furthermore it will also exist in
// available_map_. Thus an allocated memory pointer can be in both
// allocation_map_ and available_map_ simultaneously.
// 2. Memory pointer maybe removed from allocation_map_, when it
// is freed outside of the scope of this allocator, but was allocated
// by this allocator.
// 3. Available map only contains that memory which was allocated
// by this allocator and subsequently freed by this allocator.
// As a result of above invariants, allocated memory ptr cannot be in
// available_map_ unless it is in allocation_map_ as well.
ska::flat_hash_map<size_t, c10::SmallVector<void*, 16>> available_map_;
static ska::flat_hash_map<void*, size_t> allocation_map_;
// Since allocation_map, which is a global instance, is mutated/read via
// all public APIs we need a global mutex.
static std::mutex mutex_;
public:
static void record_free(void* ptr);
virtual ~CPUCachingAllocator();
// Checks the cache to see if allocation of size bytes can be found.
// If so return cached memory, else
// allocates memory, records it for caching and returns.
virtual void* allocate(const size_t bytes);
// Checks if the memory being freed is was marked for allocation by
// an earlier call to allocate. If so cache the allocation.
// Otherwise free.
virtual void free(void* ptr);
};
CPUCachingAllocator* GetDefaultCPUCachingAllocator();
bool ThreadLocalCachingAllocatorEnabled();
CPUCachingAllocator* GetThreadLocalCachingAllocator();
class C10_API WithCPUCachingAllocatorGuard {
public:
WithCPUCachingAllocatorGuard(CPUCachingAllocator* allocator);
~WithCPUCachingAllocatorGuard();
private:
CPUCachingAllocator* prev_caching_allocator_ptr_{nullptr};
};
} // namespace c10
| 4,115
| 38.2
| 80
|
h
|
null |
pytorch-main/c10/mobile/CPUProfilingAllocator.h
|
#pragma once
#include <c10/util/flat_hash_map.h>
#include <memory>
#include <vector>
namespace c10 {
/*
* Given a sequence of allocations in a thread, AllocationPlan records
* 1. size of each allocation
* 2. Lifetime of each allocation.
* 3. allocation offsets: Memory offset for each allocation in a single blob of
* memory
* 4. Total size of a blob of memory required to satisfy all the allocations.
*/
class C10_API AllocationPlan {
private:
// Records size of each allocation by their sequential allocation ids.
std::vector<uint64_t> allocation_sizes;
// This maps one allocation id (X) to another allocation id (Y).
// Allocation X is alive until allocation Y. From allocation Y onwards
// allocation X is not referenced.
// Thus Y is the id of the first allocation after X is freed.
// NB: When an allocation is recorded, along with recording its size,
// we also set the lifetime to be numeric_limits::max()
// This is to track allocations that are made during the scope of
// profiling but were not freed until after the scope ended.
// Such allocations are not managed by profiling allocator.
std::vector<uint64_t> allocation_lifetimes;
// Maps an allocation to some offset in a blob of memory.
std::vector<uint64_t> allocation_offsets;
uint64_t total_size{0};
void clear();
friend class AllocationPlanner;
friend class CPUProfilingAllocator;
};
/*
* Map of memory ptr to allocation id. This is auxiliary information only
* used to establish lifetime of allocations.
*/
class C10_API AllocationPlanner {
private:
AllocationPlan* allocation_plan_{nullptr};
// Maps allocated ptr to its allocation id.
// This is used when freeing the memory to look up the allocation id
// in order to establish the lifetime of a particular allocation.
ska::flat_hash_map<const void*, uint64_t> allocation_ptr_to_id_;
uint64_t allocation_id_{0};
bool validation_mode_{false};
bool validate_allocation(const uint64_t size, const void* ptr);
bool validate_free(const void* ptr);
public:
bool validation_success{true};
AllocationPlanner() = delete;
AllocationPlanner(AllocationPlan* plan, bool validate = false)
: allocation_plan_(plan), validation_mode_(validate) {}
void record_allocation(const uint64_t size, const void* ptr);
void record_free(const void* ptr);
void formulate_plan();
void clear();
};
// NOT THREAD SAFE profiling allocator.
class C10_API CPUProfilingAllocator {
private:
const AllocationPlan* plan_{nullptr};
uint64_t allocation_id_{0};
uint64_t current_size_{0};
void* blob_{nullptr};
ska::flat_hash_map<const void*, uint64_t> allocation_ptr_to_id_;
public:
~CPUProfilingAllocator();
void set_plan(const AllocationPlan* plan);
void unset_plan();
void* allocate(const size_t bytes);
void free(void* const ptr);
};
/*
* Usage: Profile allocations made by one run of the model.
* AllocationPlan plan;
* {
* WithProfileAllocationGuard profile_guard(&plan);
* module.forward(...);
* }
* plan now contains allocation plan.
*/
class C10_API WithProfileAllocationsGuard {
public:
WithProfileAllocationsGuard(AllocationPlan* plan);
~WithProfileAllocationsGuard();
private:
std::unique_ptr<AllocationPlanner> planner_;
};
/*
* Usage: Validate allocation plan made with WithProfileAllocationGuard
* bool plan_validation_success, success = true;
* for (some number of representative inputs)
* {
* WithValidateAllocationPlanGuard(&plan, &plan_validation_success);
* module.forward(...);
* success = success && plan_validation_success;
* }
* success == true means allocations are according to plan
* else for some inputs allocation pattern changed.
*/
class C10_API WithValidateAllocationPlanGuard {
public:
WithValidateAllocationPlanGuard(AllocationPlan* plan, bool* success);
~WithValidateAllocationPlanGuard();
private:
std::unique_ptr<AllocationPlanner> planner_;
bool* success_;
};
AllocationPlanner* GetThreadLocalAllocationPlanner();
/*
* Usage: Allocate tensors accordingly to allocation plan
* First make allocation plan.
* See WithProfileAllocationsGuard usage.
* Second validate allocation plan.
* See WithValidateAllocationPlanGuard usage.
* CPUProfilingAllocator profiling_allocator;
* {
* WithProfilingAllocatorGuard allocator_guard(&profiling_allocator, &plan);
* module.forward(...);
* }
*/
class C10_API WithProfilingAllocatorGuard {
public:
WithProfilingAllocatorGuard(
CPUProfilingAllocator* allocator,
const AllocationPlan* plan);
~WithProfilingAllocatorGuard();
};
CPUProfilingAllocator* GetThreadLocalProfilingAllocator();
} // namespace c10
| 4,667
| 30.12
| 79
|
h
|
null |
pytorch-main/c10/test/util/complex_math_test_common.h
|
// Warning: this file is included twice in
// aten/src/ATen/test/cuda_complex_math_test.cu
#include <c10/util/complex.h>
#include <gtest/gtest.h>
#ifndef PI
#define PI 3.141592653589793238463
#endif
#ifndef tol
#define tol 1e-6
#endif
// Exponential functions
C10_DEFINE_TEST(TestExponential, IPi) {
// exp(i*pi) = -1
{
c10::complex<float> e_i_pi = std::exp(c10::complex<float>(0, float(PI)));
C10_ASSERT_NEAR(e_i_pi.real(), -1, tol);
C10_ASSERT_NEAR(e_i_pi.imag(), 0, tol);
}
{
c10::complex<float> e_i_pi = ::exp(c10::complex<float>(0, float(PI)));
C10_ASSERT_NEAR(e_i_pi.real(), -1, tol);
C10_ASSERT_NEAR(e_i_pi.imag(), 0, tol);
}
{
c10::complex<double> e_i_pi = std::exp(c10::complex<double>(0, PI));
C10_ASSERT_NEAR(e_i_pi.real(), -1, tol);
C10_ASSERT_NEAR(e_i_pi.imag(), 0, tol);
}
{
c10::complex<double> e_i_pi = ::exp(c10::complex<double>(0, PI));
C10_ASSERT_NEAR(e_i_pi.real(), -1, tol);
C10_ASSERT_NEAR(e_i_pi.imag(), 0, tol);
}
}
C10_DEFINE_TEST(TestExponential, EulerFormula) {
// exp(ix) = cos(x) + i * sin(x)
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> e = std::exp(x);
float expected_real = std::exp(x.real()) * std::cos(x.imag());
float expected_imag = std::exp(x.real()) * std::sin(x.imag());
C10_ASSERT_NEAR(e.real(), expected_real, tol);
C10_ASSERT_NEAR(e.imag(), expected_imag, tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> e = ::exp(x);
float expected_real = ::exp(x.real()) * ::cos(x.imag());
float expected_imag = ::exp(x.real()) * ::sin(x.imag());
C10_ASSERT_NEAR(e.real(), expected_real, tol);
C10_ASSERT_NEAR(e.imag(), expected_imag, tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> e = std::exp(x);
float expected_real = std::exp(x.real()) * std::cos(x.imag());
float expected_imag = std::exp(x.real()) * std::sin(x.imag());
C10_ASSERT_NEAR(e.real(), expected_real, tol);
C10_ASSERT_NEAR(e.imag(), expected_imag, tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> e = ::exp(x);
float expected_real = ::exp(x.real()) * ::cos(x.imag());
float expected_imag = ::exp(x.real()) * ::sin(x.imag());
C10_ASSERT_NEAR(e.real(), expected_real, tol);
C10_ASSERT_NEAR(e.imag(), expected_imag, tol);
}
}
C10_DEFINE_TEST(TestExpm1, Normal) {
// expm1(x) = exp(x) - 1
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> l1 = std::expm1(x);
c10::complex<float> l2 = std::exp(x) - 1.0f;
C10_ASSERT_NEAR(l1.real(), l2.real(), tol);
C10_ASSERT_NEAR(l1.imag(), l2.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> l1 = std::expm1(x);
c10::complex<double> l2 = std::exp(x) - 1.0;
C10_ASSERT_NEAR(l1.real(), l2.real(), tol);
C10_ASSERT_NEAR(l1.imag(), l2.imag(), tol);
}
}
C10_DEFINE_TEST(TestExpm1, Small) {
// expm1(x) = exp(x) - 1
// expm1(x) provides greater precision than exp(x) - 1 for small values of x
{
c10::complex<float> x(1e-30, 1e-30);
c10::complex<float> l1 = std::expm1(x);
C10_ASSERT_NEAR(l1.real(), 1e-30, tol);
C10_ASSERT_NEAR(l1.imag(), 1e-30, tol);
}
{
c10::complex<double> x(1e-100, 1e-100);
c10::complex<double> l1 = std::expm1(x);
C10_ASSERT_NEAR(l1.real(), 1e-30, tol);
C10_ASSERT_NEAR(l1.imag(), 1e-30, tol);
}
}
C10_DEFINE_TEST(TestLog, Definition) {
// log(x) = log(r) + i*theta
{
c10::complex<float> x(1.2, 3.4);
c10::complex<float> l = std::log(x);
float expected_real = std::log(std::abs(x));
float expected_imag = std::arg(x);
C10_ASSERT_NEAR(l.real(), expected_real, tol);
C10_ASSERT_NEAR(l.imag(), expected_imag, tol);
}
{
c10::complex<float> x(1.2, 3.4);
c10::complex<float> l = ::log(x);
float expected_real = ::log(std::abs(x));
float expected_imag = std::arg(x);
C10_ASSERT_NEAR(l.real(), expected_real, tol);
C10_ASSERT_NEAR(l.imag(), expected_imag, tol);
}
{
c10::complex<double> x(1.2, 3.4);
c10::complex<double> l = std::log(x);
float expected_real = std::log(std::abs(x));
float expected_imag = std::arg(x);
C10_ASSERT_NEAR(l.real(), expected_real, tol);
C10_ASSERT_NEAR(l.imag(), expected_imag, tol);
}
{
c10::complex<double> x(1.2, 3.4);
c10::complex<double> l = ::log(x);
float expected_real = ::log(std::abs(x));
float expected_imag = std::arg(x);
C10_ASSERT_NEAR(l.real(), expected_real, tol);
C10_ASSERT_NEAR(l.imag(), expected_imag, tol);
}
}
C10_DEFINE_TEST(TestLog10, Rev) {
// log10(10^x) = x
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> l = std::log10(std::pow(float(10), x));
C10_ASSERT_NEAR(l.real(), float(0.1), tol);
C10_ASSERT_NEAR(l.imag(), float(1.2), tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> l = ::log10(::pow(float(10), x));
C10_ASSERT_NEAR(l.real(), float(0.1), tol);
C10_ASSERT_NEAR(l.imag(), float(1.2), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> l = std::log10(std::pow(double(10), x));
C10_ASSERT_NEAR(l.real(), double(0.1), tol);
C10_ASSERT_NEAR(l.imag(), double(1.2), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> l = ::log10(::pow(double(10), x));
C10_ASSERT_NEAR(l.real(), double(0.1), tol);
C10_ASSERT_NEAR(l.imag(), double(1.2), tol);
}
}
C10_DEFINE_TEST(TestLog2, Rev) {
// log2(2^x) = x
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> l = std::log2(std::pow(float(2), x));
C10_ASSERT_NEAR(l.real(), float(0.1), tol);
C10_ASSERT_NEAR(l.imag(), float(1.2), tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> l = ::log2(std::pow(float(2), x));
C10_ASSERT_NEAR(l.real(), float(0.1), tol);
C10_ASSERT_NEAR(l.imag(), float(1.2), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> l = std::log2(std::pow(double(2), x));
C10_ASSERT_NEAR(l.real(), double(0.1), tol);
C10_ASSERT_NEAR(l.imag(), double(1.2), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> l = ::log2(std::pow(double(2), x));
C10_ASSERT_NEAR(l.real(), double(0.1), tol);
C10_ASSERT_NEAR(l.imag(), double(1.2), tol);
}
}
C10_DEFINE_TEST(TestLog1p, Normal) {
// log1p(x) = log(1 + x)
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> l1 = std::log1p(x);
c10::complex<float> l2 = std::log(1.0f + x);
C10_ASSERT_NEAR(l1.real(), l2.real(), tol);
C10_ASSERT_NEAR(l1.imag(), l2.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> l1 = std::log1p(x);
c10::complex<double> l2 = std::log(1.0 + x);
C10_ASSERT_NEAR(l1.real(), l2.real(), tol);
C10_ASSERT_NEAR(l1.imag(), l2.imag(), tol);
}
}
C10_DEFINE_TEST(TestLog1p, Small) {
// log(1 + x) ~ x for |x| << 1
{
c10::complex<float> x(1e-9, 2e-9);
c10::complex<float> l = std::log1p(x);
C10_ASSERT_NEAR(l.real() / x.real(), 1, tol);
C10_ASSERT_NEAR(l.imag() / x.imag(), 1, tol);
}
{
c10::complex<double> x(1e-100, 2e-100);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real() / x.real(), 1, tol);
C10_ASSERT_NEAR(l.imag() / x.imag(), 1, tol);
}
}
C10_DEFINE_TEST(TestLog1p, Extreme) {
// log(1 + x) ~ x for |x| << 1 and in the brink of overflow / underflow
{
c10::complex<float> x(-1, 1e-30);
c10::complex<float> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), -69.07755278982137, tol);
C10_ASSERT_NEAR(l.imag(), 1.5707963267948966, tol);
}
{
c10::complex<float> x(-1, 1e30);
c10::complex<float> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 69.07755278982137, tol);
C10_ASSERT_NEAR(l.imag(), 1.5707963267948966, tol);
}
{
c10::complex<float> x(1e30, 1);
c10::complex<float> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 69.07755278982137, tol);
C10_ASSERT_NEAR(l.imag(), 1e-30, tol);
}
{
c10::complex<float> x(1e-30, 1);
c10::complex<float> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 0.34657359027997264, tol);
C10_ASSERT_NEAR(l.imag(), 0.7853981633974483, tol);
}
{
c10::complex<float> x(1e30, 1e30);
c10::complex<float> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 69.42412638010134, tol);
C10_ASSERT_NEAR(l.imag(), 0.7853981633974483, tol);
}
{
c10::complex<float> x(1e-38, 1e-38);
c10::complex<float> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 1e-38, tol);
C10_ASSERT_NEAR(l.imag(), 1e-38, tol);
}
{
c10::complex<float> x(1e-38, 2e-30);
c10::complex<float> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 1e-30, tol);
C10_ASSERT_NEAR(l.imag(), 2e-30, tol);
}
{
c10::complex<double> x(-1, 1e-250);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), -575.6462732485114, tol);
C10_ASSERT_NEAR(l.imag(), 1.5707963267948966, tol);
}
{
c10::complex<double> x(-1, 1e250);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 575.6462732485114, tol);
C10_ASSERT_NEAR(l.imag(), 1.5707963267948966, tol);
}
{
c10::complex<double> x(1e250, 1);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 575.6462732485114, tol);
C10_ASSERT_NEAR(l.imag(), 1e-250, tol);
}
{
c10::complex<double> x(1e-250, 1);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 0.34657359027997264, tol);
C10_ASSERT_NEAR(l.imag(), 0.7853981633974483, tol);
}
{
c10::complex<double> x(1e250, 1e250);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 575.9928468387914, tol);
C10_ASSERT_NEAR(l.imag(), 0.7853981633974483, tol);
}
{
c10::complex<double> x(1e-250, 1e-250);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 1e-250, tol);
C10_ASSERT_NEAR(l.imag(), 1e-250, tol);
}
{
c10::complex<double> x(1e-250, 2e-250);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 1e-250, tol);
C10_ASSERT_NEAR(l.imag(), 2e-250, tol);
}
{
c10::complex<double> x(2e-308, 1.5e-250);
c10::complex<double> l = std::log1p(x);
C10_ASSERT_NEAR(l.real(), 2e-308, tol);
C10_ASSERT_NEAR(l.imag(), 1.5e-308, tol);
}
}
// Power functions
C10_DEFINE_TEST(TestPowSqrt, Equal) {
// x^0.5 = sqrt(x)
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = std::pow(x, float(0.5));
c10::complex<float> z = std::sqrt(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = ::pow(x, float(0.5));
c10::complex<float> z = ::sqrt(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = std::pow(x, double(0.5));
c10::complex<double> z = std::sqrt(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = ::pow(x, double(0.5));
c10::complex<double> z = ::sqrt(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
}
C10_DEFINE_TEST(TestPow, Square) {
// x^2 = x * x
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = std::pow(x, float(2));
c10::complex<float> z = x * x;
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = ::pow(x, float(2));
c10::complex<float> z = x * x;
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = std::pow(x, double(2));
c10::complex<double> z = x * x;
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = ::pow(x, double(2));
c10::complex<double> z = x * x;
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
}
// Trigonometric functions and hyperbolic functions
C10_DEFINE_TEST(TestSinCosSinhCosh, Identity) {
// sin(x + i * y) = sin(x) * cosh(y) + i * cos(x) * sinh(y)
// cos(x + i * y) = cos(x) * cosh(y) - i * sin(x) * sinh(y)
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = std::sin(x);
float expected_real = std::sin(x.real()) * std::cosh(x.imag());
float expected_imag = std::cos(x.real()) * std::sinh(x.imag());
C10_ASSERT_NEAR(y.real(), expected_real, tol);
C10_ASSERT_NEAR(y.imag(), expected_imag, tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = ::sin(x);
float expected_real = ::sin(x.real()) * ::cosh(x.imag());
float expected_imag = ::cos(x.real()) * ::sinh(x.imag());
C10_ASSERT_NEAR(y.real(), expected_real, tol);
C10_ASSERT_NEAR(y.imag(), expected_imag, tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = std::cos(x);
float expected_real = std::cos(x.real()) * std::cosh(x.imag());
float expected_imag = -std::sin(x.real()) * std::sinh(x.imag());
C10_ASSERT_NEAR(y.real(), expected_real, tol);
C10_ASSERT_NEAR(y.imag(), expected_imag, tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = ::cos(x);
float expected_real = ::cos(x.real()) * ::cosh(x.imag());
float expected_imag = -::sin(x.real()) * ::sinh(x.imag());
C10_ASSERT_NEAR(y.real(), expected_real, tol);
C10_ASSERT_NEAR(y.imag(), expected_imag, tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = std::sin(x);
float expected_real = std::sin(x.real()) * std::cosh(x.imag());
float expected_imag = std::cos(x.real()) * std::sinh(x.imag());
C10_ASSERT_NEAR(y.real(), expected_real, tol);
C10_ASSERT_NEAR(y.imag(), expected_imag, tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = ::sin(x);
float expected_real = ::sin(x.real()) * ::cosh(x.imag());
float expected_imag = ::cos(x.real()) * ::sinh(x.imag());
C10_ASSERT_NEAR(y.real(), expected_real, tol);
C10_ASSERT_NEAR(y.imag(), expected_imag, tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = std::cos(x);
float expected_real = std::cos(x.real()) * std::cosh(x.imag());
float expected_imag = -std::sin(x.real()) * std::sinh(x.imag());
C10_ASSERT_NEAR(y.real(), expected_real, tol);
C10_ASSERT_NEAR(y.imag(), expected_imag, tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = ::cos(x);
float expected_real = ::cos(x.real()) * ::cosh(x.imag());
float expected_imag = -::sin(x.real()) * ::sinh(x.imag());
C10_ASSERT_NEAR(y.real(), expected_real, tol);
C10_ASSERT_NEAR(y.imag(), expected_imag, tol);
}
}
C10_DEFINE_TEST(TestTan, Identity) {
// tan(x) = sin(x) / cos(x)
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = std::tan(x);
c10::complex<float> z = std::sin(x) / std::cos(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = ::tan(x);
c10::complex<float> z = ::sin(x) / ::cos(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = std::tan(x);
c10::complex<double> z = std::sin(x) / std::cos(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = ::tan(x);
c10::complex<double> z = ::sin(x) / ::cos(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
}
C10_DEFINE_TEST(TestTanh, Identity) {
// tanh(x) = sinh(x) / cosh(x)
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = std::tanh(x);
c10::complex<float> z = std::sinh(x) / std::cosh(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<float> x(0.1, 1.2);
c10::complex<float> y = ::tanh(x);
c10::complex<float> z = ::sinh(x) / ::cosh(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = std::tanh(x);
c10::complex<double> z = std::sinh(x) / std::cosh(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
{
c10::complex<double> x(0.1, 1.2);
c10::complex<double> y = ::tanh(x);
c10::complex<double> z = ::sinh(x) / ::cosh(x);
C10_ASSERT_NEAR(y.real(), z.real(), tol);
C10_ASSERT_NEAR(y.imag(), z.imag(), tol);
}
}
// Rev trigonometric functions
C10_DEFINE_TEST(TestRevTrigonometric, Rev) {
// asin(sin(x)) = x
// acos(cos(x)) = x
// atan(tan(x)) = x
{
c10::complex<float> x(0.5, 0.6);
c10::complex<float> s = std::sin(x);
c10::complex<float> ss = std::asin(s);
c10::complex<float> c = std::cos(x);
c10::complex<float> cc = std::acos(c);
c10::complex<float> t = std::tan(x);
c10::complex<float> tt = std::atan(t);
C10_ASSERT_NEAR(x.real(), ss.real(), tol);
C10_ASSERT_NEAR(x.imag(), ss.imag(), tol);
C10_ASSERT_NEAR(x.real(), cc.real(), tol);
C10_ASSERT_NEAR(x.imag(), cc.imag(), tol);
C10_ASSERT_NEAR(x.real(), tt.real(), tol);
C10_ASSERT_NEAR(x.imag(), tt.imag(), tol);
}
{
c10::complex<float> x(0.5, 0.6);
c10::complex<float> s = ::sin(x);
c10::complex<float> ss = ::asin(s);
c10::complex<float> c = ::cos(x);
c10::complex<float> cc = ::acos(c);
c10::complex<float> t = ::tan(x);
c10::complex<float> tt = ::atan(t);
C10_ASSERT_NEAR(x.real(), ss.real(), tol);
C10_ASSERT_NEAR(x.imag(), ss.imag(), tol);
C10_ASSERT_NEAR(x.real(), cc.real(), tol);
C10_ASSERT_NEAR(x.imag(), cc.imag(), tol);
C10_ASSERT_NEAR(x.real(), tt.real(), tol);
C10_ASSERT_NEAR(x.imag(), tt.imag(), tol);
}
{
c10::complex<double> x(0.5, 0.6);
c10::complex<double> s = std::sin(x);
c10::complex<double> ss = std::asin(s);
c10::complex<double> c = std::cos(x);
c10::complex<double> cc = std::acos(c);
c10::complex<double> t = std::tan(x);
c10::complex<double> tt = std::atan(t);
C10_ASSERT_NEAR(x.real(), ss.real(), tol);
C10_ASSERT_NEAR(x.imag(), ss.imag(), tol);
C10_ASSERT_NEAR(x.real(), cc.real(), tol);
C10_ASSERT_NEAR(x.imag(), cc.imag(), tol);
C10_ASSERT_NEAR(x.real(), tt.real(), tol);
C10_ASSERT_NEAR(x.imag(), tt.imag(), tol);
}
{
c10::complex<double> x(0.5, 0.6);
c10::complex<double> s = ::sin(x);
c10::complex<double> ss = ::asin(s);
c10::complex<double> c = ::cos(x);
c10::complex<double> cc = ::acos(c);
c10::complex<double> t = ::tan(x);
c10::complex<double> tt = ::atan(t);
C10_ASSERT_NEAR(x.real(), ss.real(), tol);
C10_ASSERT_NEAR(x.imag(), ss.imag(), tol);
C10_ASSERT_NEAR(x.real(), cc.real(), tol);
C10_ASSERT_NEAR(x.imag(), cc.imag(), tol);
C10_ASSERT_NEAR(x.real(), tt.real(), tol);
C10_ASSERT_NEAR(x.imag(), tt.imag(), tol);
}
}
// Rev hyperbolic functions
C10_DEFINE_TEST(TestRevHyperbolic, Rev) {
// asinh(sinh(x)) = x
// acosh(cosh(x)) = x
// atanh(tanh(x)) = x
{
c10::complex<float> x(0.5, 0.6);
c10::complex<float> s = std::sinh(x);
c10::complex<float> ss = std::asinh(s);
c10::complex<float> c = std::cosh(x);
c10::complex<float> cc = std::acosh(c);
c10::complex<float> t = std::tanh(x);
c10::complex<float> tt = std::atanh(t);
C10_ASSERT_NEAR(x.real(), ss.real(), tol);
C10_ASSERT_NEAR(x.imag(), ss.imag(), tol);
C10_ASSERT_NEAR(x.real(), cc.real(), tol);
C10_ASSERT_NEAR(x.imag(), cc.imag(), tol);
C10_ASSERT_NEAR(x.real(), tt.real(), tol);
C10_ASSERT_NEAR(x.imag(), tt.imag(), tol);
}
{
c10::complex<float> x(0.5, 0.6);
c10::complex<float> s = ::sinh(x);
c10::complex<float> ss = ::asinh(s);
c10::complex<float> c = ::cosh(x);
c10::complex<float> cc = ::acosh(c);
c10::complex<float> t = ::tanh(x);
c10::complex<float> tt = ::atanh(t);
C10_ASSERT_NEAR(x.real(), ss.real(), tol);
C10_ASSERT_NEAR(x.imag(), ss.imag(), tol);
C10_ASSERT_NEAR(x.real(), cc.real(), tol);
C10_ASSERT_NEAR(x.imag(), cc.imag(), tol);
C10_ASSERT_NEAR(x.real(), tt.real(), tol);
C10_ASSERT_NEAR(x.imag(), tt.imag(), tol);
}
{
c10::complex<double> x(0.5, 0.6);
c10::complex<double> s = std::sinh(x);
c10::complex<double> ss = std::asinh(s);
c10::complex<double> c = std::cosh(x);
c10::complex<double> cc = std::acosh(c);
c10::complex<double> t = std::tanh(x);
c10::complex<double> tt = std::atanh(t);
C10_ASSERT_NEAR(x.real(), ss.real(), tol);
C10_ASSERT_NEAR(x.imag(), ss.imag(), tol);
C10_ASSERT_NEAR(x.real(), cc.real(), tol);
C10_ASSERT_NEAR(x.imag(), cc.imag(), tol);
C10_ASSERT_NEAR(x.real(), tt.real(), tol);
C10_ASSERT_NEAR(x.imag(), tt.imag(), tol);
}
{
c10::complex<double> x(0.5, 0.6);
c10::complex<double> s = ::sinh(x);
c10::complex<double> ss = ::asinh(s);
c10::complex<double> c = ::cosh(x);
c10::complex<double> cc = ::acosh(c);
c10::complex<double> t = ::tanh(x);
c10::complex<double> tt = ::atanh(t);
C10_ASSERT_NEAR(x.real(), ss.real(), tol);
C10_ASSERT_NEAR(x.imag(), ss.imag(), tol);
C10_ASSERT_NEAR(x.real(), cc.real(), tol);
C10_ASSERT_NEAR(x.imag(), cc.imag(), tol);
C10_ASSERT_NEAR(x.real(), tt.real(), tol);
C10_ASSERT_NEAR(x.imag(), tt.imag(), tol);
}
}
| 21,964
| 31.881737
| 78
|
h
|
null |
pytorch-main/c10/test/util/complex_test_common.h
|
#include <c10/macros/Macros.h>
#include <c10/util/complex.h>
#include <c10/util/hash.h>
#include <gtest/gtest.h>
#include <sstream>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#if (defined(__CUDACC__) || defined(__HIPCC__))
#define MAYBE_GLOBAL __global__
#else
#define MAYBE_GLOBAL
#endif
#define PI 3.141592653589793238463
namespace memory {
MAYBE_GLOBAL void test_size() {
static_assert(sizeof(c10::complex<float>) == 2 * sizeof(float), "");
static_assert(sizeof(c10::complex<double>) == 2 * sizeof(double), "");
}
MAYBE_GLOBAL void test_align() {
static_assert(alignof(c10::complex<float>) == 2 * sizeof(float), "");
static_assert(alignof(c10::complex<double>) == 2 * sizeof(double), "");
}
MAYBE_GLOBAL void test_pod() {
static_assert(std::is_standard_layout<c10::complex<float>>::value, "");
static_assert(std::is_standard_layout<c10::complex<double>>::value, "");
}
TEST(TestMemory, ReinterpretCast) {
{
std::complex<float> z(1, 2);
c10::complex<float> zz = *reinterpret_cast<c10::complex<float>*>(&z);
ASSERT_EQ(zz.real(), float(1));
ASSERT_EQ(zz.imag(), float(2));
}
{
c10::complex<float> z(3, 4);
std::complex<float> zz = *reinterpret_cast<std::complex<float>*>(&z);
ASSERT_EQ(zz.real(), float(3));
ASSERT_EQ(zz.imag(), float(4));
}
{
std::complex<double> z(1, 2);
c10::complex<double> zz = *reinterpret_cast<c10::complex<double>*>(&z);
ASSERT_EQ(zz.real(), double(1));
ASSERT_EQ(zz.imag(), double(2));
}
{
c10::complex<double> z(3, 4);
std::complex<double> zz = *reinterpret_cast<std::complex<double>*>(&z);
ASSERT_EQ(zz.real(), double(3));
ASSERT_EQ(zz.imag(), double(4));
}
}
#if defined(__CUDACC__) || defined(__HIPCC__)
TEST(TestMemory, ThrustReinterpretCast) {
{
thrust::complex<float> z(1, 2);
c10::complex<float> zz = *reinterpret_cast<c10::complex<float>*>(&z);
ASSERT_EQ(zz.real(), float(1));
ASSERT_EQ(zz.imag(), float(2));
}
{
c10::complex<float> z(3, 4);
thrust::complex<float> zz = *reinterpret_cast<thrust::complex<float>*>(&z);
ASSERT_EQ(zz.real(), float(3));
ASSERT_EQ(zz.imag(), float(4));
}
{
thrust::complex<double> z(1, 2);
c10::complex<double> zz = *reinterpret_cast<c10::complex<double>*>(&z);
ASSERT_EQ(zz.real(), double(1));
ASSERT_EQ(zz.imag(), double(2));
}
{
c10::complex<double> z(3, 4);
thrust::complex<double> zz =
*reinterpret_cast<thrust::complex<double>*>(&z);
ASSERT_EQ(zz.real(), double(3));
ASSERT_EQ(zz.imag(), double(4));
}
}
#endif
} // namespace memory
namespace constructors {
template <typename scalar_t>
C10_HOST_DEVICE void test_construct_from_scalar() {
constexpr scalar_t num1 = scalar_t(1.23);
constexpr scalar_t num2 = scalar_t(4.56);
constexpr scalar_t zero = scalar_t();
static_assert(c10::complex<scalar_t>(num1, num2).real() == num1, "");
static_assert(c10::complex<scalar_t>(num1, num2).imag() == num2, "");
static_assert(c10::complex<scalar_t>(num1).real() == num1, "");
static_assert(c10::complex<scalar_t>(num1).imag() == zero, "");
static_assert(c10::complex<scalar_t>().real() == zero, "");
static_assert(c10::complex<scalar_t>().imag() == zero, "");
}
template <typename scalar_t, typename other_t>
C10_HOST_DEVICE void test_construct_from_other() {
constexpr other_t num1 = other_t(1.23);
constexpr other_t num2 = other_t(4.56);
constexpr scalar_t num3 = scalar_t(num1);
constexpr scalar_t num4 = scalar_t(num2);
static_assert(
c10::complex<scalar_t>(c10::complex<other_t>(num1, num2)).real() == num3,
"");
static_assert(
c10::complex<scalar_t>(c10::complex<other_t>(num1, num2)).imag() == num4,
"");
}
MAYBE_GLOBAL void test_convert_constructors() {
test_construct_from_scalar<float>();
test_construct_from_scalar<double>();
static_assert(
std::is_convertible<c10::complex<float>, c10::complex<float>>::value, "");
static_assert(
!std::is_convertible<c10::complex<double>, c10::complex<float>>::value,
"");
static_assert(
std::is_convertible<c10::complex<float>, c10::complex<double>>::value,
"");
static_assert(
std::is_convertible<c10::complex<double>, c10::complex<double>>::value,
"");
static_assert(
std::is_constructible<c10::complex<float>, c10::complex<float>>::value,
"");
static_assert(
std::is_constructible<c10::complex<double>, c10::complex<float>>::value,
"");
static_assert(
std::is_constructible<c10::complex<float>, c10::complex<double>>::value,
"");
static_assert(
std::is_constructible<c10::complex<double>, c10::complex<double>>::value,
"");
test_construct_from_other<float, float>();
test_construct_from_other<float, double>();
test_construct_from_other<double, float>();
test_construct_from_other<double, double>();
}
template <typename scalar_t>
C10_HOST_DEVICE void test_construct_from_std() {
constexpr scalar_t num1 = scalar_t(1.23);
constexpr scalar_t num2 = scalar_t(4.56);
static_assert(
c10::complex<scalar_t>(std::complex<scalar_t>(num1, num2)).real() == num1,
"");
static_assert(
c10::complex<scalar_t>(std::complex<scalar_t>(num1, num2)).imag() == num2,
"");
}
MAYBE_GLOBAL void test_std_conversion() {
test_construct_from_std<float>();
test_construct_from_std<double>();
}
#if defined(__CUDACC__) || defined(__HIPCC__)
template <typename scalar_t>
void test_construct_from_thrust() {
constexpr scalar_t num1 = scalar_t(1.23);
constexpr scalar_t num2 = scalar_t(4.56);
ASSERT_EQ(
c10::complex<scalar_t>(thrust::complex<scalar_t>(num1, num2)).real(),
num1);
ASSERT_EQ(
c10::complex<scalar_t>(thrust::complex<scalar_t>(num1, num2)).imag(),
num2);
}
TEST(TestConstructors, FromThrust) {
test_construct_from_thrust<float>();
test_construct_from_thrust<double>();
}
#endif
TEST(TestConstructors, UnorderedMap) {
std::unordered_map<
c10::complex<double>,
c10::complex<double>,
c10::hash<c10::complex<double>>>
m;
auto key1 = c10::complex<double>(2.5, 3);
auto key2 = c10::complex<double>(2, 0);
auto val1 = c10::complex<double>(2, -3.2);
auto val2 = c10::complex<double>(0, -3);
m[key1] = val1;
m[key2] = val2;
ASSERT_EQ(m[key1], val1);
ASSERT_EQ(m[key2], val2);
}
} // namespace constructors
namespace assignment {
template <typename scalar_t>
constexpr c10::complex<scalar_t> one() {
c10::complex<scalar_t> result(3, 4);
result = scalar_t(1);
return result;
}
MAYBE_GLOBAL void test_assign_real() {
static_assert(one<float>().real() == float(1), "");
static_assert(one<float>().imag() == float(), "");
static_assert(one<double>().real() == double(1), "");
static_assert(one<double>().imag() == double(), "");
}
constexpr std::tuple<c10::complex<double>, c10::complex<float>> one_two() {
constexpr c10::complex<float> src(1, 2);
c10::complex<double> ret0;
c10::complex<float> ret1;
ret0 = ret1 = src;
return std::make_tuple(ret0, ret1);
}
MAYBE_GLOBAL void test_assign_other() {
constexpr auto tup = one_two();
static_assert(std::get<c10::complex<double>>(tup).real() == double(1), "");
static_assert(std::get<c10::complex<double>>(tup).imag() == double(2), "");
static_assert(std::get<c10::complex<float>>(tup).real() == float(1), "");
static_assert(std::get<c10::complex<float>>(tup).imag() == float(2), "");
}
constexpr std::tuple<c10::complex<double>, c10::complex<float>> one_two_std() {
constexpr std::complex<float> src(1, 1);
c10::complex<double> ret0;
c10::complex<float> ret1;
ret0 = ret1 = src;
return std::make_tuple(ret0, ret1);
}
MAYBE_GLOBAL void test_assign_std() {
constexpr auto tup = one_two();
static_assert(std::get<c10::complex<double>>(tup).real() == double(1), "");
static_assert(std::get<c10::complex<double>>(tup).imag() == double(2), "");
static_assert(std::get<c10::complex<float>>(tup).real() == float(1), "");
static_assert(std::get<c10::complex<float>>(tup).imag() == float(2), "");
}
#if defined(__CUDACC__) || defined(__HIPCC__)
C10_HOST_DEVICE std::tuple<c10::complex<double>, c10::complex<float>>
one_two_thrust() {
thrust::complex<float> src(1, 2);
c10::complex<double> ret0;
c10::complex<float> ret1;
ret0 = ret1 = src;
return std::make_tuple(ret0, ret1);
}
TEST(TestAssignment, FromThrust) {
auto tup = one_two_thrust();
ASSERT_EQ(std::get<c10::complex<double>>(tup).real(), double(1));
ASSERT_EQ(std::get<c10::complex<double>>(tup).imag(), double(2));
ASSERT_EQ(std::get<c10::complex<float>>(tup).real(), float(1));
ASSERT_EQ(std::get<c10::complex<float>>(tup).imag(), float(2));
}
#endif
} // namespace assignment
namespace literals {
MAYBE_GLOBAL void test_complex_literals() {
using namespace c10::complex_literals;
static_assert(std::is_same<decltype(0.5_if), c10::complex<float>>::value, "");
static_assert((0.5_if).real() == float(), "");
static_assert((0.5_if).imag() == float(0.5), "");
static_assert(
std::is_same<decltype(0.5_id), c10::complex<double>>::value, "");
static_assert((0.5_id).real() == float(), "");
static_assert((0.5_id).imag() == float(0.5), "");
static_assert(std::is_same<decltype(1_if), c10::complex<float>>::value, "");
static_assert((1_if).real() == float(), "");
static_assert((1_if).imag() == float(1), "");
static_assert(std::is_same<decltype(1_id), c10::complex<double>>::value, "");
static_assert((1_id).real() == double(), "");
static_assert((1_id).imag() == double(1), "");
}
} // namespace literals
namespace real_imag {
template <typename scalar_t>
constexpr c10::complex<scalar_t> zero_one() {
c10::complex<scalar_t> result;
result.imag(scalar_t(1));
return result;
}
template <typename scalar_t>
constexpr c10::complex<scalar_t> one_zero() {
c10::complex<scalar_t> result;
result.real(scalar_t(1));
return result;
}
MAYBE_GLOBAL void test_real_imag_modify() {
static_assert(zero_one<float>().real() == float(0), "");
static_assert(zero_one<float>().imag() == float(1), "");
static_assert(zero_one<double>().real() == double(0), "");
static_assert(zero_one<double>().imag() == double(1), "");
static_assert(one_zero<float>().real() == float(1), "");
static_assert(one_zero<float>().imag() == float(0), "");
static_assert(one_zero<double>().real() == double(1), "");
static_assert(one_zero<double>().imag() == double(0), "");
}
} // namespace real_imag
namespace arithmetic_assign {
template <typename scalar_t>
constexpr c10::complex<scalar_t> p(scalar_t value) {
c10::complex<scalar_t> result(scalar_t(2), scalar_t(2));
result += value;
return result;
}
template <typename scalar_t>
constexpr c10::complex<scalar_t> m(scalar_t value) {
c10::complex<scalar_t> result(scalar_t(2), scalar_t(2));
result -= value;
return result;
}
template <typename scalar_t>
constexpr c10::complex<scalar_t> t(scalar_t value) {
c10::complex<scalar_t> result(scalar_t(2), scalar_t(2));
result *= value;
return result;
}
template <typename scalar_t>
constexpr c10::complex<scalar_t> d(scalar_t value) {
c10::complex<scalar_t> result(scalar_t(2), scalar_t(2));
result /= value;
return result;
}
template <typename scalar_t>
C10_HOST_DEVICE void test_arithmetic_assign_scalar() {
constexpr c10::complex<scalar_t> x = p(scalar_t(1));
static_assert(x.real() == scalar_t(3), "");
static_assert(x.imag() == scalar_t(2), "");
constexpr c10::complex<scalar_t> y = m(scalar_t(1));
static_assert(y.real() == scalar_t(1), "");
static_assert(y.imag() == scalar_t(2), "");
constexpr c10::complex<scalar_t> z = t(scalar_t(2));
static_assert(z.real() == scalar_t(4), "");
static_assert(z.imag() == scalar_t(4), "");
constexpr c10::complex<scalar_t> t = d(scalar_t(2));
static_assert(t.real() == scalar_t(1), "");
static_assert(t.imag() == scalar_t(1), "");
}
template <typename scalar_t, typename rhs_t>
constexpr c10::complex<scalar_t> p(
scalar_t real,
scalar_t imag,
c10::complex<rhs_t> rhs) {
c10::complex<scalar_t> result(real, imag);
result += rhs;
return result;
}
template <typename scalar_t, typename rhs_t>
constexpr c10::complex<scalar_t> m(
scalar_t real,
scalar_t imag,
c10::complex<rhs_t> rhs) {
c10::complex<scalar_t> result(real, imag);
result -= rhs;
return result;
}
template <typename scalar_t, typename rhs_t>
constexpr c10::complex<scalar_t> t(
scalar_t real,
scalar_t imag,
c10::complex<rhs_t> rhs) {
c10::complex<scalar_t> result(real, imag);
result *= rhs;
return result;
}
template <typename scalar_t, typename rhs_t>
constexpr c10::complex<scalar_t> d(
scalar_t real,
scalar_t imag,
c10::complex<rhs_t> rhs) {
c10::complex<scalar_t> result(real, imag);
result /= rhs;
return result;
}
template <typename scalar_t>
C10_HOST_DEVICE void test_arithmetic_assign_complex() {
using namespace c10::complex_literals;
constexpr c10::complex<scalar_t> x2 = p(scalar_t(2), scalar_t(2), 1.0_if);
static_assert(x2.real() == scalar_t(2), "");
static_assert(x2.imag() == scalar_t(3), "");
constexpr c10::complex<scalar_t> x3 = p(scalar_t(2), scalar_t(2), 1.0_id);
static_assert(x3.real() == scalar_t(2), "");
// this test is skipped due to a bug in constexpr evaluation
// in nvcc. This bug has already been fixed since CUDA 11.2
#if !defined(__CUDACC__) || (defined(CUDA_VERSION) && CUDA_VERSION >= 11020)
static_assert(x3.imag() == scalar_t(3), "");
#endif
constexpr c10::complex<scalar_t> y2 = m(scalar_t(2), scalar_t(2), 1.0_if);
static_assert(y2.real() == scalar_t(2), "");
static_assert(y2.imag() == scalar_t(1), "");
constexpr c10::complex<scalar_t> y3 = m(scalar_t(2), scalar_t(2), 1.0_id);
static_assert(y3.real() == scalar_t(2), "");
// this test is skipped due to a bug in constexpr evaluation
// in nvcc. This bug has already been fixed since CUDA 11.2
#if !defined(__CUDACC__) || (defined(CUDA_VERSION) && CUDA_VERSION >= 11020)
static_assert(y3.imag() == scalar_t(1), "");
#endif
constexpr c10::complex<scalar_t> z2 = t(scalar_t(1), scalar_t(-2), 1.0_if);
static_assert(z2.real() == scalar_t(2), "");
static_assert(z2.imag() == scalar_t(1), "");
constexpr c10::complex<scalar_t> z3 = t(scalar_t(1), scalar_t(-2), 1.0_id);
static_assert(z3.real() == scalar_t(2), "");
static_assert(z3.imag() == scalar_t(1), "");
constexpr c10::complex<scalar_t> t2 = d(scalar_t(-1), scalar_t(2), 1.0_if);
static_assert(t2.real() == scalar_t(2), "");
static_assert(t2.imag() == scalar_t(1), "");
constexpr c10::complex<scalar_t> t3 = d(scalar_t(-1), scalar_t(2), 1.0_id);
static_assert(t3.real() == scalar_t(2), "");
static_assert(t3.imag() == scalar_t(1), "");
}
MAYBE_GLOBAL void test_arithmetic_assign() {
test_arithmetic_assign_scalar<float>();
test_arithmetic_assign_scalar<double>();
test_arithmetic_assign_complex<float>();
test_arithmetic_assign_complex<double>();
}
} // namespace arithmetic_assign
namespace arithmetic {
template <typename scalar_t>
C10_HOST_DEVICE void test_arithmetic_() {
static_assert(
c10::complex<scalar_t>(1, 2) == +c10::complex<scalar_t>(1, 2), "");
static_assert(
c10::complex<scalar_t>(-1, -2) == -c10::complex<scalar_t>(1, 2), "");
static_assert(
c10::complex<scalar_t>(1, 2) + c10::complex<scalar_t>(3, 4) ==
c10::complex<scalar_t>(4, 6),
"");
static_assert(
c10::complex<scalar_t>(1, 2) + scalar_t(3) ==
c10::complex<scalar_t>(4, 2),
"");
static_assert(
scalar_t(3) + c10::complex<scalar_t>(1, 2) ==
c10::complex<scalar_t>(4, 2),
"");
static_assert(
c10::complex<scalar_t>(1, 2) - c10::complex<scalar_t>(3, 4) ==
c10::complex<scalar_t>(-2, -2),
"");
static_assert(
c10::complex<scalar_t>(1, 2) - scalar_t(3) ==
c10::complex<scalar_t>(-2, 2),
"");
static_assert(
scalar_t(3) - c10::complex<scalar_t>(1, 2) ==
c10::complex<scalar_t>(2, -2),
"");
static_assert(
c10::complex<scalar_t>(1, 2) * c10::complex<scalar_t>(3, 4) ==
c10::complex<scalar_t>(-5, 10),
"");
static_assert(
c10::complex<scalar_t>(1, 2) * scalar_t(3) ==
c10::complex<scalar_t>(3, 6),
"");
static_assert(
scalar_t(3) * c10::complex<scalar_t>(1, 2) ==
c10::complex<scalar_t>(3, 6),
"");
static_assert(
c10::complex<scalar_t>(-5, 10) / c10::complex<scalar_t>(3, 4) ==
c10::complex<scalar_t>(1, 2),
"");
static_assert(
c10::complex<scalar_t>(5, 10) / scalar_t(5) ==
c10::complex<scalar_t>(1, 2),
"");
static_assert(
scalar_t(25) / c10::complex<scalar_t>(3, 4) ==
c10::complex<scalar_t>(3, -4),
"");
}
MAYBE_GLOBAL void test_arithmetic() {
test_arithmetic_<float>();
test_arithmetic_<double>();
}
template <typename T, typename int_t>
void test_binary_ops_for_int_type_(T real, T img, int_t num) {
c10::complex<T> c(real, img);
ASSERT_EQ(c + num, c10::complex<T>(real + num, img));
ASSERT_EQ(num + c, c10::complex<T>(num + real, img));
ASSERT_EQ(c - num, c10::complex<T>(real - num, img));
ASSERT_EQ(num - c, c10::complex<T>(num - real, -img));
ASSERT_EQ(c * num, c10::complex<T>(real * num, img * num));
ASSERT_EQ(num * c, c10::complex<T>(num * real, num * img));
ASSERT_EQ(c / num, c10::complex<T>(real / num, img / num));
ASSERT_EQ(
num / c,
c10::complex<T>(num * real / std::norm(c), -num * img / std::norm(c)));
}
template <typename T>
void test_binary_ops_for_all_int_types_(T real, T img, int8_t i) {
test_binary_ops_for_int_type_<T, int8_t>(real, img, i);
test_binary_ops_for_int_type_<T, int16_t>(real, img, i);
test_binary_ops_for_int_type_<T, int32_t>(real, img, i);
test_binary_ops_for_int_type_<T, int64_t>(real, img, i);
}
TEST(TestArithmeticIntScalar, All) {
test_binary_ops_for_all_int_types_<float>(1.0, 0.1, 1);
test_binary_ops_for_all_int_types_<double>(-1.3, -0.2, -2);
}
} // namespace arithmetic
namespace equality {
template <typename scalar_t>
C10_HOST_DEVICE void test_equality_() {
static_assert(
c10::complex<scalar_t>(1, 2) == c10::complex<scalar_t>(1, 2), "");
static_assert(c10::complex<scalar_t>(1, 0) == scalar_t(1), "");
static_assert(scalar_t(1) == c10::complex<scalar_t>(1, 0), "");
static_assert(
c10::complex<scalar_t>(1, 2) != c10::complex<scalar_t>(3, 4), "");
static_assert(c10::complex<scalar_t>(1, 2) != scalar_t(1), "");
static_assert(scalar_t(1) != c10::complex<scalar_t>(1, 2), "");
}
MAYBE_GLOBAL void test_equality() {
test_equality_<float>();
test_equality_<double>();
}
} // namespace equality
namespace io {
template <typename scalar_t>
void test_io_() {
std::stringstream ss;
c10::complex<scalar_t> a(1, 2);
ss << a;
ASSERT_EQ(ss.str(), "(1,2)");
ss.str("(3,4)");
ss >> a;
ASSERT_TRUE(a == c10::complex<scalar_t>(3, 4));
}
TEST(TestIO, All) {
test_io_<float>();
test_io_<double>();
}
} // namespace io
namespace test_std {
template <typename scalar_t>
C10_HOST_DEVICE void test_callable_() {
static_assert(std::real(c10::complex<scalar_t>(1, 2)) == scalar_t(1), "");
static_assert(std::imag(c10::complex<scalar_t>(1, 2)) == scalar_t(2), "");
std::abs(c10::complex<scalar_t>(1, 2));
std::arg(c10::complex<scalar_t>(1, 2));
static_assert(std::norm(c10::complex<scalar_t>(3, 4)) == scalar_t(25), "");
static_assert(
std::conj(c10::complex<scalar_t>(3, 4)) == c10::complex<scalar_t>(3, -4),
"");
c10::polar(float(1), float(PI / 2));
c10::polar(double(1), double(PI / 2));
}
MAYBE_GLOBAL void test_callable() {
test_callable_<float>();
test_callable_<double>();
}
template <typename scalar_t>
void test_values_() {
ASSERT_EQ(std::abs(c10::complex<scalar_t>(3, 4)), scalar_t(5));
ASSERT_LT(std::abs(std::arg(c10::complex<scalar_t>(0, 1)) - PI / 2), 1e-6);
ASSERT_LT(
std::abs(
c10::polar(scalar_t(1), scalar_t(PI / 2)) -
c10::complex<scalar_t>(0, 1)),
1e-6);
}
TEST(TestStd, BasicFunctions) {
test_values_<float>();
test_values_<double>();
// CSQRT edge cases: checks for overflows which are likely to occur
// if square root is computed using polar form
ASSERT_LT(
std::abs(std::sqrt(c10::complex<float>(-1e20, -4988429.2)).real()), 3e-4);
ASSERT_LT(
std::abs(std::sqrt(c10::complex<double>(-1e60, -4988429.2)).real()),
3e-4);
}
} // namespace test_std
| 20,546
| 30.179059
| 80
|
h
|
null |
pytorch-main/c10/util/AlignOf.h
|
//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the AlignedCharArray and AlignedCharArrayUnion classes.
//
//===----------------------------------------------------------------------===//
// ATen: modified from llvm::AlignOf
// replaced LLVM_ALIGNAS with alignas
#pragma once
#include <cstddef>
namespace c10 {
/// \struct AlignedCharArray
/// \brief Helper for building an aligned character array type.
///
/// This template is used to explicitly build up a collection of aligned
/// character array types. We have to build these up using a macro and explicit
/// specialization to cope with MSVC (at least till 2015) where only an
/// integer literal can be used to specify an alignment constraint. Once built
/// up here, we can then begin to indirect between these using normal C++
/// template parameters.
// MSVC requires special handling here.
#ifndef _MSC_VER
template <size_t Alignment, size_t Size>
struct AlignedCharArray {
alignas(Alignment) char buffer[Size];
};
#else // _MSC_VER
/// \brief Create a type with an aligned char buffer.
template <size_t Alignment, size_t Size>
struct AlignedCharArray;
// We provide special variations of this template for the most common
// alignments because __declspec(align(...)) doesn't actually work when it is
// a member of a by-value function argument in MSVC, even if the alignment
// request is something reasonably like 8-byte or 16-byte. Note that we can't
// even include the declspec with the union that forces the alignment because
// MSVC warns on the existence of the declspec despite the union member forcing
// proper alignment.
template <size_t Size>
struct AlignedCharArray<1, Size> {
union {
char aligned;
char buffer[Size];
};
};
template <size_t Size>
struct AlignedCharArray<2, Size> {
union {
short aligned;
char buffer[Size];
};
};
template <size_t Size>
struct AlignedCharArray<4, Size> {
union {
int aligned;
char buffer[Size];
};
};
template <size_t Size>
struct AlignedCharArray<8, Size> {
union {
double aligned;
char buffer[Size];
};
};
// The rest of these are provided with a __declspec(align(...)) and we simply
// can't pass them by-value as function arguments on MSVC.
#define AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
template <size_t Size> \
struct AlignedCharArray<x, Size> { \
__declspec(align(x)) char buffer[Size]; \
};
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
#undef AT_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
#endif // _MSC_VER
namespace detail {
template <
typename T1,
typename T2 = char,
typename T3 = char,
typename T4 = char,
typename T5 = char,
typename T6 = char,
typename T7 = char,
typename T8 = char,
typename T9 = char,
typename T10 = char>
class AlignerImpl {
T1 t1;
T2 t2;
T3 t3;
T4 t4;
T5 t5;
T6 t6;
T7 t7;
T8 t8;
T9 t9;
T10 t10;
public:
AlignerImpl() = delete;
};
template <
typename T1,
typename T2 = char,
typename T3 = char,
typename T4 = char,
typename T5 = char,
typename T6 = char,
typename T7 = char,
typename T8 = char,
typename T9 = char,
typename T10 = char>
union SizerImpl {
char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)],
arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)],
arr9[sizeof(T9)], arr10[sizeof(T10)];
};
} // end namespace detail
/// \brief This union template exposes a suitably aligned and sized character
/// array member which can hold elements of any of up to ten types.
///
/// These types may be arrays, structs, or any other types. The goal is to
/// expose a char array buffer member which can be used as suitable storage for
/// a placement new of any of these types. Support for more than ten types can
/// be added at the cost of more boilerplate.
template <
typename T1,
typename T2 = char,
typename T3 = char,
typename T4 = char,
typename T5 = char,
typename T6 = char,
typename T7 = char,
typename T8 = char,
typename T9 = char,
typename T10 = char>
struct AlignedCharArrayUnion
: AlignedCharArray<
alignof(detail::AlignerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>),
sizeof(::c10::detail::
SizerImpl<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>)> {};
} // end namespace c10
| 4,844
| 26.685714
| 80
|
h
|
null |
pytorch-main/c10/util/ArrayRef.h
|
//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// ATen: modified from llvm::ArrayRef.
// removed llvm-specific functionality
// removed some implicit const -> non-const conversions that rely on
// complicated std::enable_if meta-programming
// removed a bunch of slice variants for simplicity...
#pragma once
#include <c10/util/C++17.h>
#include <c10/util/Deprecated.h>
#include <c10/util/Exception.h>
#include <c10/util/SmallVector.h>
#include <array>
#include <iterator>
#include <vector>
namespace c10 {
/// ArrayRef - Represent a constant reference to an array (0 or more elements
/// consecutively in memory), i.e. a start pointer and a length. It allows
/// various APIs to take consecutive elements easily and conveniently.
///
/// This class does not own the underlying data, it is expected to be used in
/// situations where the data resides in some other buffer, whose lifetime
/// extends past that of the ArrayRef. For this reason, it is not in general
/// safe to store an ArrayRef.
///
/// This is intended to be trivially copyable, so it should be passed by
/// value.
template <typename T>
class ArrayRef final {
public:
using iterator = const T*;
using const_iterator = const T*;
using size_type = size_t;
using value_type = T;
using reverse_iterator = std::reverse_iterator<iterator>;
private:
/// The start of the array, in an external buffer.
const T* Data;
/// The number of elements.
size_type Length;
void debugCheckNullptrInvariant() {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
Data != nullptr || Length == 0,
"created ArrayRef with nullptr and non-zero length! c10::optional relies on this being illegal");
}
public:
/// @name Constructors
/// @{
/// Construct an empty ArrayRef.
/* implicit */ constexpr ArrayRef() : Data(nullptr), Length(0) {}
/// Construct an ArrayRef from a single element.
// TODO Make this explicit
constexpr ArrayRef(const T& OneElt) : Data(&OneElt), Length(1) {}
/// Construct an ArrayRef from a pointer and length.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* data, size_t length)
: Data(data), Length(length) {
debugCheckNullptrInvariant();
}
/// Construct an ArrayRef from a range.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef(const T* begin, const T* end)
: Data(begin), Length(end - begin) {
debugCheckNullptrInvariant();
}
/// Construct an ArrayRef from a SmallVector. This is templated in order to
/// avoid instantiating SmallVectorTemplateCommon<T> whenever we
/// copy-construct an ArrayRef.
template <typename U>
/* implicit */ ArrayRef(const SmallVectorTemplateCommon<T, U>& Vec)
: Data(Vec.data()), Length(Vec.size()) {
debugCheckNullptrInvariant();
}
template <
typename Container,
typename = std::enable_if_t<std::is_same<
std::remove_const_t<decltype(std::declval<Container>().data())>,
T*>::value>>
/* implicit */ ArrayRef(const Container& container)
: Data(container.data()), Length(container.size()) {
debugCheckNullptrInvariant();
}
/// Construct an ArrayRef from a std::vector.
// The enable_if stuff here makes sure that this isn't used for
// std::vector<bool>, because ArrayRef can't work on a std::vector<bool>
// bitfield.
template <typename A>
/* implicit */ ArrayRef(const std::vector<T, A>& Vec)
: Data(Vec.data()), Length(Vec.size()) {
static_assert(
!std::is_same<T, bool>::value,
"ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.");
}
/// Construct an ArrayRef from a std::array
template <size_t N>
/* implicit */ constexpr ArrayRef(const std::array<T, N>& Arr)
: Data(Arr.data()), Length(N) {}
/// Construct an ArrayRef from a C array.
template <size_t N>
/* implicit */ constexpr ArrayRef(const T (&Arr)[N]) : Data(Arr), Length(N) {}
/// Construct an ArrayRef from a std::initializer_list.
/* implicit */ constexpr ArrayRef(const std::initializer_list<T>& Vec)
: Data(
std::begin(Vec) == std::end(Vec) ? static_cast<T*>(nullptr)
: std::begin(Vec)),
Length(Vec.size()) {}
/// @}
/// @name Simple Operations
/// @{
constexpr iterator begin() const {
return Data;
}
constexpr iterator end() const {
return Data + Length;
}
// These are actually the same as iterator, since ArrayRef only
// gives you const iterators.
constexpr const_iterator cbegin() const {
return Data;
}
constexpr const_iterator cend() const {
return Data + Length;
}
constexpr reverse_iterator rbegin() const {
return reverse_iterator(end());
}
constexpr reverse_iterator rend() const {
return reverse_iterator(begin());
}
/// empty - Check if the array is empty.
constexpr bool empty() const {
return Length == 0;
}
constexpr const T* data() const {
return Data;
}
/// size - Get the array size.
constexpr size_t size() const {
return Length;
}
/// front - Get the first element.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& front() const {
TORCH_CHECK(
!empty(), "ArrayRef: attempted to access front() of empty list");
return Data[0];
}
/// back - Get the last element.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& back() const {
TORCH_CHECK(!empty(), "ArrayRef: attempted to access back() of empty list");
return Data[Length - 1];
}
/// equals - Check for element-wise equality.
constexpr bool equals(ArrayRef RHS) const {
return Length == RHS.Length && std::equal(begin(), end(), RHS.begin());
}
/// slice(n, m) - Take M elements of the array starting at element N
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef<T> slice(size_t N, size_t M)
const {
TORCH_CHECK(
N + M <= size(),
"ArrayRef: invalid slice, N = ",
N,
"; M = ",
M,
"; size = ",
size());
return ArrayRef<T>(data() + N, M);
}
/// slice(n) - Chop off the first N elements of the array.
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA ArrayRef<T> slice(size_t N) const {
TORCH_CHECK(
N <= size(), "ArrayRef: invalid slice, N = ", N, "; size = ", size());
return slice(N, size() - N);
}
/// @}
/// @name Operator Overloads
/// @{
constexpr const T& operator[](size_t Index) const {
return Data[Index];
}
/// Vector compatibility
C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA const T& at(size_t Index) const {
TORCH_CHECK(
Index < Length,
"ArrayRef: invalid index Index = ",
Index,
"; Length = ",
Length);
return Data[Index];
}
/// Disallow accidental assignment from a temporary.
///
/// The declaration here is extra complicated so that "arrayRef = {}"
/// continues to select the move assignment operator.
template <typename U>
typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type&
operator=(U&& Temporary) = delete;
/// Disallow accidental assignment from a temporary.
///
/// The declaration here is extra complicated so that "arrayRef = {}"
/// continues to select the move assignment operator.
template <typename U>
typename std::enable_if<std::is_same<U, T>::value, ArrayRef<T>>::type&
operator=(std::initializer_list<U>) = delete;
/// @}
/// @name Expensive Operations
/// @{
std::vector<T> vec() const {
return std::vector<T>(Data, Data + Length);
}
/// @}
};
template <typename T>
std::ostream& operator<<(std::ostream& out, ArrayRef<T> list) {
int i = 0;
out << "[";
for (const auto& e : list) {
if (i++ > 0)
out << ", ";
out << e;
}
out << "]";
return out;
}
/// @name ArrayRef Convenience constructors
/// @{
/// Construct an ArrayRef from a single element.
template <typename T>
ArrayRef<T> makeArrayRef(const T& OneElt) {
return OneElt;
}
/// Construct an ArrayRef from a pointer and length.
template <typename T>
ArrayRef<T> makeArrayRef(const T* data, size_t length) {
return ArrayRef<T>(data, length);
}
/// Construct an ArrayRef from a range.
template <typename T>
ArrayRef<T> makeArrayRef(const T* begin, const T* end) {
return ArrayRef<T>(begin, end);
}
/// Construct an ArrayRef from a SmallVector.
template <typename T>
ArrayRef<T> makeArrayRef(const SmallVectorImpl<T>& Vec) {
return Vec;
}
/// Construct an ArrayRef from a SmallVector.
template <typename T, unsigned N>
ArrayRef<T> makeArrayRef(const SmallVector<T, N>& Vec) {
return Vec;
}
/// Construct an ArrayRef from a std::vector.
template <typename T>
ArrayRef<T> makeArrayRef(const std::vector<T>& Vec) {
return Vec;
}
/// Construct an ArrayRef from a std::array.
template <typename T, std::size_t N>
ArrayRef<T> makeArrayRef(const std::array<T, N>& Arr) {
return Arr;
}
/// Construct an ArrayRef from an ArrayRef (no-op) (const)
template <typename T>
ArrayRef<T> makeArrayRef(const ArrayRef<T>& Vec) {
return Vec;
}
/// Construct an ArrayRef from an ArrayRef (no-op)
template <typename T>
ArrayRef<T>& makeArrayRef(ArrayRef<T>& Vec) {
return Vec;
}
/// Construct an ArrayRef from a C array.
template <typename T, size_t N>
ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
return ArrayRef<T>(Arr);
}
// WARNING: Template instantiation will NOT be willing to do an implicit
// conversions to get you to an c10::ArrayRef, which is why we need so
// many overloads.
template <typename T>
bool operator==(c10::ArrayRef<T> a1, c10::ArrayRef<T> a2) {
return a1.equals(a2);
}
template <typename T>
bool operator!=(c10::ArrayRef<T> a1, c10::ArrayRef<T> a2) {
return !a1.equals(a2);
}
template <typename T>
bool operator==(const std::vector<T>& a1, c10::ArrayRef<T> a2) {
return c10::ArrayRef<T>(a1).equals(a2);
}
template <typename T>
bool operator!=(const std::vector<T>& a1, c10::ArrayRef<T> a2) {
return !c10::ArrayRef<T>(a1).equals(a2);
}
template <typename T>
bool operator==(c10::ArrayRef<T> a1, const std::vector<T>& a2) {
return a1.equals(c10::ArrayRef<T>(a2));
}
template <typename T>
bool operator!=(c10::ArrayRef<T> a1, const std::vector<T>& a2) {
return !a1.equals(c10::ArrayRef<T>(a2));
}
using IntArrayRef = ArrayRef<int64_t>;
// This alias is deprecated because it doesn't make ownership
// semantics obvious. Use IntArrayRef instead!
C10_DEFINE_DEPRECATED_USING(IntList, ArrayRef<int64_t>)
} // namespace c10
| 10,671
| 27.61126
| 105
|
h
|
null |
pytorch-main/c10/util/BFloat16-inl.h
|
#pragma once
#include <c10/macros/Macros.h>
#include <c10/util/bit_cast.h>
#include <limits>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion")
#endif
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
#if defined(CL_SYCL_LANGUAGE_VERSION)
#include <CL/sycl.hpp> // for SYCL 1.2.1
#else
#include <sycl/sycl.hpp> // for SYCL 2020
#endif
#include <ext/oneapi/bfloat16.hpp>
#endif
namespace c10 {
/// Constructors
inline C10_HOST_DEVICE BFloat16::BFloat16(float value)
:
#if defined(__CUDACC__) && !defined(USE_ROCM) && defined(__CUDA_ARCH__) && \
__CUDA_ARCH__ >= 800
x(__bfloat16_as_ushort(__float2bfloat16(value)))
#elif defined(__SYCL_DEVICE_ONLY__) && \
defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
x(c10::bit_cast<uint16_t>(sycl::ext::oneapi::bfloat16(value)))
#else
// RNE by default
x(detail::round_to_nearest_even(value))
#endif
{
}
/// Implicit conversions
inline C10_HOST_DEVICE BFloat16::operator float() const {
#if defined(__CUDACC__) && !defined(USE_ROCM)
return __bfloat162float(*reinterpret_cast<const __nv_bfloat16*>(&x));
#elif defined(__SYCL_DEVICE_ONLY__) && \
defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
return float(*reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x));
#else
return detail::f32_from_bits(x);
#endif
}
#if defined(__CUDACC__) && !defined(USE_ROCM)
inline C10_HOST_DEVICE BFloat16::BFloat16(const __nv_bfloat16& value) {
x = *reinterpret_cast<const unsigned short*>(&value);
}
inline C10_HOST_DEVICE BFloat16::operator __nv_bfloat16() const {
return *reinterpret_cast<const __nv_bfloat16*>(&x);
}
#endif
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
inline C10_HOST_DEVICE BFloat16::BFloat16(
const sycl::ext::oneapi::bfloat16& value) {
x = *reinterpret_cast<const unsigned short*>(&value);
}
inline C10_HOST_DEVICE BFloat16::operator sycl::ext::oneapi::bfloat16() const {
return *reinterpret_cast<const sycl::ext::oneapi::bfloat16*>(&x);
}
#endif
// CUDA intrinsics
#if defined(__CUDACC__) || defined(__HIPCC__)
inline C10_DEVICE BFloat16 __ldg(const BFloat16* ptr) {
#if !defined(USE_ROCM) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
return __ldg(reinterpret_cast<const __nv_bfloat16*>(ptr));
#else
return *ptr;
#endif
}
#endif
/// Arithmetic
inline C10_HOST_DEVICE BFloat16
operator+(const BFloat16& a, const BFloat16& b) {
return static_cast<float>(a) + static_cast<float>(b);
}
inline C10_HOST_DEVICE BFloat16
operator-(const BFloat16& a, const BFloat16& b) {
return static_cast<float>(a) - static_cast<float>(b);
}
inline C10_HOST_DEVICE BFloat16
operator*(const BFloat16& a, const BFloat16& b) {
return static_cast<float>(a) * static_cast<float>(b);
}
inline C10_HOST_DEVICE BFloat16 operator/(const BFloat16& a, const BFloat16& b)
__ubsan_ignore_float_divide_by_zero__ {
return static_cast<float>(a) / static_cast<float>(b);
}
inline C10_HOST_DEVICE BFloat16 operator-(const BFloat16& a) {
return -static_cast<float>(a);
}
inline C10_HOST_DEVICE BFloat16& operator+=(BFloat16& a, const BFloat16& b) {
a = a + b;
return a;
}
inline C10_HOST_DEVICE BFloat16& operator-=(BFloat16& a, const BFloat16& b) {
a = a - b;
return a;
}
inline C10_HOST_DEVICE BFloat16& operator*=(BFloat16& a, const BFloat16& b) {
a = a * b;
return a;
}
inline C10_HOST_DEVICE BFloat16& operator/=(BFloat16& a, const BFloat16& b) {
a = a / b;
return a;
}
inline C10_HOST_DEVICE BFloat16& operator|(BFloat16& a, const BFloat16& b) {
a.x = a.x | b.x;
return a;
}
inline C10_HOST_DEVICE BFloat16& operator^(BFloat16& a, const BFloat16& b) {
a.x = a.x ^ b.x;
return a;
}
inline C10_HOST_DEVICE BFloat16& operator&(BFloat16& a, const BFloat16& b) {
a.x = a.x & b.x;
return a;
}
/// Arithmetic with floats
inline C10_HOST_DEVICE float operator+(BFloat16 a, float b) {
return static_cast<float>(a) + b;
}
inline C10_HOST_DEVICE float operator-(BFloat16 a, float b) {
return static_cast<float>(a) - b;
}
inline C10_HOST_DEVICE float operator*(BFloat16 a, float b) {
return static_cast<float>(a) * b;
}
inline C10_HOST_DEVICE float operator/(BFloat16 a, float b) {
return static_cast<float>(a) / b;
}
inline C10_HOST_DEVICE float operator+(float a, BFloat16 b) {
return a + static_cast<float>(b);
}
inline C10_HOST_DEVICE float operator-(float a, BFloat16 b) {
return a - static_cast<float>(b);
}
inline C10_HOST_DEVICE float operator*(float a, BFloat16 b) {
return a * static_cast<float>(b);
}
inline C10_HOST_DEVICE float operator/(float a, BFloat16 b) {
return a / static_cast<float>(b);
}
inline C10_HOST_DEVICE float& operator+=(float& a, const BFloat16& b) {
return a += static_cast<float>(b);
}
inline C10_HOST_DEVICE float& operator-=(float& a, const BFloat16& b) {
return a -= static_cast<float>(b);
}
inline C10_HOST_DEVICE float& operator*=(float& a, const BFloat16& b) {
return a *= static_cast<float>(b);
}
inline C10_HOST_DEVICE float& operator/=(float& a, const BFloat16& b) {
return a /= static_cast<float>(b);
}
/// Arithmetic with doubles
inline C10_HOST_DEVICE double operator+(BFloat16 a, double b) {
return static_cast<double>(a) + b;
}
inline C10_HOST_DEVICE double operator-(BFloat16 a, double b) {
return static_cast<double>(a) - b;
}
inline C10_HOST_DEVICE double operator*(BFloat16 a, double b) {
return static_cast<double>(a) * b;
}
inline C10_HOST_DEVICE double operator/(BFloat16 a, double b) {
return static_cast<double>(a) / b;
}
inline C10_HOST_DEVICE double operator+(double a, BFloat16 b) {
return a + static_cast<double>(b);
}
inline C10_HOST_DEVICE double operator-(double a, BFloat16 b) {
return a - static_cast<double>(b);
}
inline C10_HOST_DEVICE double operator*(double a, BFloat16 b) {
return a * static_cast<double>(b);
}
inline C10_HOST_DEVICE double operator/(double a, BFloat16 b) {
return a / static_cast<double>(b);
}
/// Arithmetic with ints
inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int b) {
return a + static_cast<BFloat16>(b);
}
inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int b) {
return a - static_cast<BFloat16>(b);
}
inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int b) {
return a * static_cast<BFloat16>(b);
}
inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int b) {
return a / static_cast<BFloat16>(b);
}
inline C10_HOST_DEVICE BFloat16 operator+(int a, BFloat16 b) {
return static_cast<BFloat16>(a) + b;
}
inline C10_HOST_DEVICE BFloat16 operator-(int a, BFloat16 b) {
return static_cast<BFloat16>(a) - b;
}
inline C10_HOST_DEVICE BFloat16 operator*(int a, BFloat16 b) {
return static_cast<BFloat16>(a) * b;
}
inline C10_HOST_DEVICE BFloat16 operator/(int a, BFloat16 b) {
return static_cast<BFloat16>(a) / b;
}
//// Arithmetic with int64_t
inline C10_HOST_DEVICE BFloat16 operator+(BFloat16 a, int64_t b) {
return a + static_cast<BFloat16>(b);
}
inline C10_HOST_DEVICE BFloat16 operator-(BFloat16 a, int64_t b) {
return a - static_cast<BFloat16>(b);
}
inline C10_HOST_DEVICE BFloat16 operator*(BFloat16 a, int64_t b) {
return a * static_cast<BFloat16>(b);
}
inline C10_HOST_DEVICE BFloat16 operator/(BFloat16 a, int64_t b) {
return a / static_cast<BFloat16>(b);
}
inline C10_HOST_DEVICE BFloat16 operator+(int64_t a, BFloat16 b) {
return static_cast<BFloat16>(a) + b;
}
inline C10_HOST_DEVICE BFloat16 operator-(int64_t a, BFloat16 b) {
return static_cast<BFloat16>(a) - b;
}
inline C10_HOST_DEVICE BFloat16 operator*(int64_t a, BFloat16 b) {
return static_cast<BFloat16>(a) * b;
}
inline C10_HOST_DEVICE BFloat16 operator/(int64_t a, BFloat16 b) {
return static_cast<BFloat16>(a) / b;
}
// Overloading < and > operators, because std::max and std::min use them.
inline C10_HOST_DEVICE bool operator>(BFloat16& lhs, BFloat16& rhs) {
return float(lhs) > float(rhs);
}
inline C10_HOST_DEVICE bool operator<(BFloat16& lhs, BFloat16& rhs) {
return float(lhs) < float(rhs);
}
} // namespace c10
namespace std {
template <>
class numeric_limits<c10::BFloat16> {
public:
static constexpr bool is_signed = true;
static constexpr bool is_specialized = true;
static constexpr bool is_integer = false;
static constexpr bool is_exact = false;
static constexpr bool has_infinity = true;
static constexpr bool has_quiet_NaN = true;
static constexpr bool has_signaling_NaN = true;
static constexpr auto has_denorm = numeric_limits<float>::has_denorm;
static constexpr auto has_denorm_loss =
numeric_limits<float>::has_denorm_loss;
static constexpr auto round_style = numeric_limits<float>::round_style;
static constexpr bool is_iec559 = false;
static constexpr bool is_bounded = true;
static constexpr bool is_modulo = false;
static constexpr int digits = 8;
static constexpr int digits10 = 2;
static constexpr int max_digits10 = 4;
static constexpr int radix = 2;
static constexpr int min_exponent = -125;
static constexpr int min_exponent10 = -37;
static constexpr int max_exponent = 128;
static constexpr int max_exponent10 = 38;
static constexpr auto traps = numeric_limits<float>::traps;
static constexpr auto tinyness_before =
numeric_limits<float>::tinyness_before;
static constexpr c10::BFloat16 min() {
return c10::BFloat16(0x0080, c10::BFloat16::from_bits());
}
static constexpr c10::BFloat16 lowest() {
return c10::BFloat16(0xFF7F, c10::BFloat16::from_bits());
}
static constexpr c10::BFloat16 max() {
return c10::BFloat16(0x7F7F, c10::BFloat16::from_bits());
}
static constexpr c10::BFloat16 epsilon() {
return c10::BFloat16(0x3C00, c10::BFloat16::from_bits());
}
static constexpr c10::BFloat16 round_error() {
return c10::BFloat16(0x3F00, c10::BFloat16::from_bits());
}
static constexpr c10::BFloat16 infinity() {
return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
}
static constexpr c10::BFloat16 quiet_NaN() {
return c10::BFloat16(0x7FC0, c10::BFloat16::from_bits());
}
static constexpr c10::BFloat16 signaling_NaN() {
return c10::BFloat16(0x7F80, c10::BFloat16::from_bits());
}
static constexpr c10::BFloat16 denorm_min() {
return c10::BFloat16(0x0001, c10::BFloat16::from_bits());
}
};
} // namespace std
C10_CLANG_DIAGNOSTIC_POP()
| 10,329
| 29.02907
| 79
|
h
|
null |
pytorch-main/c10/util/BFloat16-math.h
|
#pragma once
#include <c10/util/BFloat16.h>
#include <c10/util/Half.h>
#include <c10/util/math_compat.h>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
#endif
namespace std {
template <typename T>
struct is_reduced_floating_point
: std::integral_constant<
bool,
std::is_same<T, c10::Half>::value ||
std::is_same<T, c10::BFloat16>::value> {};
template <typename T>
constexpr bool is_reduced_floating_point_v =
is_reduced_floating_point<T>::value;
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T acos(T a) {
return std::acos(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T asin(T a) {
return std::asin(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T atan(T a) {
return std::atan(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T erf(T a) {
return std::erf(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T erfc(T a) {
return std::erfc(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T exp(T a) {
return std::exp(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T expm1(T a) {
return std::expm1(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T log(T a) {
return std::log(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T log10(T a) {
return std::log10(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T log1p(T a) {
return std::log1p(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T log2(T a) {
return std::log2(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T ceil(T a) {
return std::ceil(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T cos(T a) {
return std::cos(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T floor(T a) {
return std::floor(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T nearbyint(T a) {
return std::nearbyint(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T sin(T a) {
return std::sin(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T tan(T a) {
return std::tan(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T sinh(T a) {
return std::sinh(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T cosh(T a) {
return std::cosh(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T tanh(T a) {
return std::tanh(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T trunc(T a) {
return std::trunc(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T lgamma(T a) {
return std::lgamma(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T sqrt(T a) {
return std::sqrt(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T rsqrt(T a) {
return 1.0 / std::sqrt(float(a));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T abs(T a) {
return std::abs(float(a));
}
#if defined(_MSC_VER) && defined(__CUDACC__)
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T pow(T a, double b) {
return std::pow(float(a), float(b));
}
#else
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T pow(T a, double b) {
return std::pow(float(a), b);
}
#endif
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T pow(T a, T b) {
return std::pow(float(a), float(b));
}
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
inline T fmod(T a, T b) {
return std::fmod(float(a), float(b));
}
/*
The following function is inspired from the implementation in `musl`
Link to License: https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
----------------------------------------------------------------------
Copyright © 2005-2020 Rich Felker, et al.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
*/
template <
typename T,
typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
C10_HOST_DEVICE inline T nextafter(T from, T to) {
// Reference:
// https://git.musl-libc.org/cgit/musl/tree/src/math/nextafter.c
using int_repr_t = uint16_t;
using float_t = T;
constexpr uint8_t bits = 16;
union {
float_t f;
int_repr_t i;
} ufrom = {from}, uto = {to};
// get a mask to get the sign bit i.e. MSB
int_repr_t sign_mask = int_repr_t{1} << (bits - 1);
// short-circuit: if either is NaN, return NaN
if (from != from || to != to) {
return from + to;
}
// short-circuit: if they are exactly the same.
if (ufrom.i == uto.i) {
return from;
}
// mask the sign-bit to zero i.e. positive
// equivalent to abs(x)
int_repr_t abs_from = ufrom.i & ~sign_mask;
int_repr_t abs_to = uto.i & ~sign_mask;
if (abs_from == 0) {
// if both are zero but with different sign,
// preserve the sign of `to`.
if (abs_to == 0) {
return to;
}
// smallest subnormal with sign of `to`.
ufrom.i = (uto.i & sign_mask) | int_repr_t{1};
return ufrom.f;
}
// if abs(from) > abs(to) or sign(from) != sign(to)
if (abs_from > abs_to || ((ufrom.i ^ uto.i) & sign_mask)) {
ufrom.i--;
} else {
ufrom.i++;
}
return ufrom.f;
}
} // namespace std
C10_CLANG_DIAGNOSTIC_POP()
| 7,888
| 26.778169
| 72
|
h
|
null |
pytorch-main/c10/util/BFloat16.h
|
#pragma once
// Defines the bloat16 type (brain floating-point). This representation uses
// 1 bit for the sign, 8 bits for the exponent and 7 bits for the mantissa.
#include <c10/macros/Macros.h>
#include <cmath>
#include <cstring>
#if defined(__CUDACC__) && !defined(USE_ROCM)
#include <cuda_bf16.h>
#endif
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
#if defined(CL_SYCL_LANGUAGE_VERSION)
#include <CL/sycl.hpp> // for SYCL 1.2.1
#else
#include <sycl/sycl.hpp> // for SYCL 2020
#endif
#include <ext/oneapi/bfloat16.hpp>
#endif
namespace c10 {
namespace detail {
inline C10_HOST_DEVICE float f32_from_bits(uint16_t src) {
float res = 0;
uint32_t tmp = src;
tmp <<= 16;
#if defined(USE_ROCM)
float* tempRes;
// We should be using memcpy in order to respect the strict aliasing rule
// but it fails in the HIP environment.
tempRes = reinterpret_cast<float*>(&tmp);
res = *tempRes;
#else
std::memcpy(&res, &tmp, sizeof(tmp));
#endif
return res;
}
inline C10_HOST_DEVICE uint16_t bits_from_f32(float src) {
uint32_t res = 0;
#if defined(USE_ROCM)
// We should be using memcpy in order to respect the strict aliasing rule
// but it fails in the HIP environment.
uint32_t* tempRes = reinterpret_cast<uint32_t*>(&src);
res = *tempRes;
#else
std::memcpy(&res, &src, sizeof(res));
#endif
return res >> 16;
}
inline C10_HOST_DEVICE uint16_t round_to_nearest_even(float src) {
#if defined(USE_ROCM)
if (src != src) {
#elif defined(_MSC_VER)
if (isnan(src)) {
#else
if (std::isnan(src)) {
#endif
return UINT16_C(0x7FC0);
} else {
union {
uint32_t U32;
float F32;
};
F32 = src;
uint32_t rounding_bias = ((U32 >> 16) & 1) + UINT32_C(0x7FFF);
return static_cast<uint16_t>((U32 + rounding_bias) >> 16);
}
}
} // namespace detail
struct alignas(2) BFloat16 {
uint16_t x;
// HIP wants __host__ __device__ tag, CUDA does not
#if defined(USE_ROCM)
C10_HOST_DEVICE BFloat16() = default;
#else
BFloat16() = default;
#endif
struct from_bits_t {};
static constexpr C10_HOST_DEVICE from_bits_t from_bits() {
return from_bits_t();
}
constexpr C10_HOST_DEVICE BFloat16(unsigned short bits, from_bits_t)
: x(bits){};
inline C10_HOST_DEVICE BFloat16(float value);
inline C10_HOST_DEVICE operator float() const;
#if defined(__CUDACC__) && !defined(USE_ROCM)
inline C10_HOST_DEVICE BFloat16(const __nv_bfloat16& value);
explicit inline C10_HOST_DEVICE operator __nv_bfloat16() const;
#endif
#if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)
inline C10_HOST_DEVICE BFloat16(const sycl::ext::oneapi::bfloat16& value);
explicit inline C10_HOST_DEVICE operator sycl::ext::oneapi::bfloat16() const;
#endif
};
} // namespace c10
#include <c10/util/BFloat16-inl.h> // IWYU pragma: keep
| 2,801
| 23.155172
| 79
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.