repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
pytorch-main/aten/src/ATen/autocast_mode.h
#pragma once #include <ATen/ATen.h> #include <ATen/NativeFunctions.h> #include <ATen/Operators.h> #include <torch/library.h> #include <c10/core/impl/LocalDispatchKeySet.h> #include <c10/util/intrusive_ptr.h> namespace at { namespace autocast { TORCH_API bool is_enabled(); TORCH_API void set_enabled(bool enabled); TORCH_API void clear_cache(); TORCH_API int increment_nesting(); TORCH_API int decrement_nesting(); TORCH_API bool is_cpu_enabled(); TORCH_API void set_cpu_enabled(bool enabled); TORCH_API at::ScalarType get_autocast_gpu_dtype(); TORCH_API at::ScalarType get_autocast_cpu_dtype(); TORCH_API void set_autocast_gpu_dtype(at::ScalarType dtype); TORCH_API void set_autocast_cpu_dtype(at::ScalarType dtype); TORCH_API bool is_xpu_enabled(); TORCH_API void set_xpu_enabled(bool enabled); TORCH_API at::ScalarType get_autocast_xpu_dtype(); TORCH_API void set_autocast_xpu_dtype(at::ScalarType dtype); TORCH_API bool is_ipu_enabled(); TORCH_API void set_ipu_enabled(bool enabled); TORCH_API at::ScalarType get_autocast_ipu_dtype(); TORCH_API void set_autocast_ipu_dtype(at::ScalarType dtype); TORCH_API bool is_hpu_enabled(); TORCH_API void set_hpu_enabled(bool enabled); TORCH_API at::ScalarType get_autocast_hpu_dtype(); TORCH_API void set_autocast_hpu_dtype(at::ScalarType dtype); TORCH_API bool is_xla_enabled(); TORCH_API void set_xla_enabled(bool enabled); TORCH_API at::ScalarType get_autocast_xla_dtype(); TORCH_API void set_autocast_xla_dtype(at::ScalarType dtype); TORCH_API bool is_privateuseone_enabled(); TORCH_API void set_privateuseone_enabled(bool enabled); TORCH_API at::ScalarType get_autocast_privateuseone_dtype(); TORCH_API void set_autocast_privateuseone_dtype(at::ScalarType dtype); TORCH_API bool is_autocast_cache_enabled(); TORCH_API void set_autocast_cache_enabled(bool enabled); namespace { bool is_autocast_eligible(const Tensor& tensor, c10::DeviceType device_type) { switch (device_type) { case c10::DeviceType::CUDA: return (tensor.is_cuda() || tensor.is_xla()) && tensor.is_floating_point(); case c10::DeviceType::CPU: return (tensor.is_cpu() || tensor.is_mkldnn()) && tensor.is_floating_point(); case c10::DeviceType::XPU: return tensor.is_xpu() && tensor.is_floating_point(); case c10::DeviceType::IPU: return tensor.is_ipu() && tensor.is_floating_point(); case c10::DeviceType::HPU: return tensor.is_hpu() && tensor.is_floating_point(); case c10::DeviceType::XLA: return tensor.is_xla() && tensor.is_floating_point(); case c10::DeviceType::PrivateUse1: return tensor.device().type() == c10::DeviceType::PrivateUse1 && tensor.is_floating_point(); default: return false; } } } // namespace inline DispatchKey get_autocast_dispatch_key_from_device_type( c10::DeviceType device_type) { switch (device_type) { case c10::DeviceType::CUDA: return DispatchKey::Autocast; case c10::DeviceType::CPU: return DispatchKey::AutocastCPU; case c10::DeviceType::XPU: return DispatchKey::AutocastXPU; case c10::DeviceType::IPU: return DispatchKey::AutocastIPU; case c10::DeviceType::HPU: return DispatchKey::AutocastHPU; case c10::DeviceType::XLA: return DispatchKey::AutocastXLA; case c10::DeviceType::PrivateUse1: return DispatchKey::AutocastPrivateUse1; default: throw std::runtime_error( "unknown device type for autocast in get_autocast_dispatch_key_from_device_type"); } } inline at::ScalarType get_lower_precision_fp_from_device_type( c10::DeviceType device_type) { switch (device_type) { case c10::DeviceType::CUDA: return get_autocast_gpu_dtype(); case c10::DeviceType::CPU: return get_autocast_cpu_dtype(); case c10::DeviceType::XPU: return get_autocast_xpu_dtype(); case c10::DeviceType::IPU: return get_autocast_ipu_dtype(); case c10::DeviceType::HPU: return get_autocast_hpu_dtype(); case c10::DeviceType::XLA: return get_autocast_xla_dtype(); case c10::DeviceType::PrivateUse1: return get_autocast_privateuseone_dtype(); default: throw std::runtime_error( "unknown device type for autocast in get_lower_precision_fp_from_device_type"); } } /******************************************************************** Logic to extract the promote type from any Tensor or TensorList args. ********************************************************************/ // Overload to catch Tensor args. // If nextArg is floating-point, compare its scalar_type with our // current best guess for the promote type, and update if necessary. inline at::ScalarType prioritize( at::ScalarType current, const Tensor& nextArg, c10::DeviceType device_type = c10::DeviceType::CUDA) { if (current == at::kDouble) { AT_ERROR("promote type is double in at::autocast::prioritize"); return current; } at::ScalarType lower_precision_fp = get_lower_precision_fp_from_device_type(device_type); if (is_autocast_eligible(nextArg, device_type)) { auto next = nextArg.scalar_type(); if (next == at::kDouble) { return current; // ignores double tensors } else if (current == at::kFloat || next == at::kFloat) { return at::kFloat; // prioritizes float over lower_precision_fp } else if (current == lower_precision_fp && next == lower_precision_fp) { return lower_precision_fp; } else { AT_ERROR("Unexpected floating ScalarType in at::autocast::prioritize"); return current; } } else { return current; } } // Overload to catch TensorList args (for e.g. cat, stack). // Reuses the overload above to process each Tensor in the list. inline at::ScalarType prioritize( at::ScalarType current, const TensorList& list, c10::DeviceType device_type = c10::DeviceType::CUDA) { for (const auto& tensor : list) { current = prioritize(current, tensor, device_type); } return current; } inline at::ScalarType prioritize( at::ScalarType current, const ITensorListRef& list, c10::DeviceType device_type = c10::DeviceType::CUDA) { for (const auto& tensor : list) { current = prioritize(current, tensor, device_type); } return current; } // Template to catch non-Tensor args (no-op that returns current best guess) template <typename T> inline at::ScalarType prioritize( at::ScalarType current, T nextArg, c10::DeviceType device_type = c10::DeviceType::CUDA) { return current; } // Overload for the tail case. inline at::ScalarType promote_type( at::ScalarType current, c10::DeviceType device_type) { return current; } // Unpack args and determine if incoming lower_precision_fp tensors need to be // promoted to float32. Non-Tensor arguments are ignored. template <typename Arg0, typename... Args> inline at::ScalarType promote_type( at::ScalarType current, c10::DeviceType device_type, Arg0 arg0, Args... args) { auto new_current = prioritize(current, arg0, device_type); return promote_type(new_current, device_type, args...); } /**************************************************** Logic to apply cached casting to any Tensor argument. ****************************************************/ inline bool is_eligible( const Tensor& arg, c10::DeviceType device_type = c10::DeviceType::CUDA) { return ( arg.defined() && is_autocast_eligible(arg, device_type) && (arg.scalar_type() != at::kDouble)); } // Overload to catch Tensor args TORCH_API Tensor cached_cast( at::ScalarType to_type, const Tensor& arg, c10::DeviceType device_type = c10::DeviceType::CUDA); // Overload to process optional<Tensor> inline c10::optional<Tensor> cached_cast( at::ScalarType to_type, const c10::optional<Tensor>& arg, c10::DeviceType device_type = c10::DeviceType::CUDA) { if (arg.has_value()) { return cached_cast(to_type, *arg, device_type); } else { return c10::nullopt; } } // Overload to process TensorLists inline std::vector<Tensor> cached_cast( at::ScalarType to_type, const TensorList& arg, c10::DeviceType device_type = c10::DeviceType::CUDA) { std::vector<Tensor> vec; vec.reserve(arg.size()); for (const auto& t : arg) { vec.emplace_back(cached_cast(to_type, t, device_type)); } return vec; } inline std::vector<Tensor> cached_cast( at::ScalarType to_type, const ITensorListRef& arg, c10::DeviceType device_type = c10::DeviceType::CUDA) { std::vector<Tensor> vec; vec.reserve(arg.size()); for (const auto& t : arg) { vec.emplace_back(cached_cast(to_type, t, device_type)); } return vec; } // Template to catch non-Tensor args. template <typename T> inline T cached_cast( at::ScalarType to_type, T arg, c10::DeviceType device_type = c10::DeviceType::CUDA) { return arg; } /******************************************************* Logic to flip an output dtype flag. Keep it simple for now by assuming only one such flag is present in the argument list. If I ever need a function with more than flag I'll figure out something else. The policy is: If the user has explicity specified a dtype, respect it. Otherwise, set it to the autocast type. ********************************************************/ // Overload to catch dtype flags c10::optional<ScalarType> inline set_opt_dtype( at::ScalarType to_type, const c10::optional<ScalarType>& dtype) { return dtype.has_value() ? dtype : to_type; } // Template to catch other args template <typename T> inline T set_opt_dtype(at::ScalarType to_type, T arg) { return arg; } template <typename... Args> inline bool firstarg_is_eligible( c10::DeviceType device_type, const Tensor& arg, Args... args) { return is_eligible(arg, device_type); } template <typename... Args> inline at::ScalarType type_from_firstarg( c10::DeviceType device_type, at::ScalarType to_type, const Tensor& arg, Args... args) { return (is_eligible(arg, device_type) ? to_type : arg.scalar_type()); } // Policies correspond to op categories that need code-divergent handling. // Wrapper templates below are specialized based on a policy template parameter. enum class CastPolicy : uint8_t { lower_precision_fp = 0, // Cast all inputs to lower_precision_fp before // running the op. Currently, lower_precision_fp is // fp16 for AutocastCUDA, and is defined by user // (default bf16) for AutocastCPU or other device. fp32, // Cast all inputs to at::kFloat before running the op. fp32_set_opt_dtype, // Treats functions (like softmax) that // 1. we'd like to run in fp32 and // 2. have a c10::optional<ScalarType> arg that controls // the output type. // fp32_set_opt_dtype wrappers' policy is: if the output // type is already set, don't touch it, otherwise, set // it to at::kFloat. fp32_append_dtype, // Treats functions (like norm) that // 1. we'd like to run in fp32 and // 2. have some overloads that accept an output type and // other overloads that don't. // fp32_append_dtype wrappers wrap the overloads that don't // have an output dtype. // The wrapper policy is: append at::kFloat to the args, // and redispatch to the type-aware overload. promote, // Run in the widest dtype among several args. }; /******************************************************************************************************** Templates to provide wrapper functions I'm copying the pattern used in core/boxing/impl/WrapFunctionIntoFunctor.h to extract args and return type. (see also https://stackoverflow.com/questions/46533698/how-to-deduce-argument-list-from-function-pointer) This strategy uses an exterior "WrapFunction" that extracts arguments on behalf of (in my case several specializations of) an interior "WrapFunction_". Interior WrapFunction_ specializations are defined for each CastPolicy. ********************************************************************************************************/ // Base template for WrapFunction_, which is specialized to contain a "call" // method each CastPolicy template < CastPolicy policy, c10::DeviceType device_type, class Redispatch, Redispatch* F, class Ret, class ArgList> struct WrapFunction_ {}; // CastPolicy::lower_precision_fp General_DeviceType template < c10::DeviceType device_type, class Redispatch, Redispatch* F, class Ret, class... Args> struct WrapFunction_< CastPolicy::lower_precision_fp, device_type, Redispatch, F, Ret, guts::typelist::typelist<Args...>> { static Ret call(Args... args) { c10::impl::ExcludeDispatchKeyGuard no_autocast( get_autocast_dispatch_key_from_device_type(device_type)); return (*F)(cached_cast( get_lower_precision_fp_from_device_type(device_type), args, device_type)...); } }; // CastPolicy::fp32 General_DeviceType template < c10::DeviceType device_type, class Redispatch, Redispatch* F, class Ret, class... Args> struct WrapFunction_< CastPolicy::fp32, device_type, Redispatch, F, Ret, guts::typelist::typelist<Args...>> { static Ret call(Args... args) { c10::impl::ExcludeDispatchKeyGuard no_autocast( get_autocast_dispatch_key_from_device_type(device_type)); return (*F)(cached_cast(at::kFloat, args, device_type)...); } }; // CastPolicy::fp32_set_opt_dtype General_DeviceType template < c10::DeviceType device_type, class Redispatch, Redispatch* F, class Ret, class... Args> struct WrapFunction_< CastPolicy::fp32_set_opt_dtype, device_type, Redispatch, F, Ret, guts::typelist::typelist<Args...>> { static Ret call(Args... args) { c10::impl::ExcludeDispatchKeyGuard no_autocast( get_autocast_dispatch_key_from_device_type(device_type)); if (firstarg_is_eligible(device_type, args...)) { return (*F)(set_opt_dtype(at::kFloat, args)...); } else { // If ineligible, calls F with unaltered args. Does not set opt dtype, // because setting opt dtype explicitly may interfere with internal // implicit promotion decisions. return (*F)(args...); } } }; // CastPolicy::fp32_append_dtype General_DeviceType template < c10::DeviceType device_type, class Redispatch, Redispatch* F, class Ret, class... Args> struct WrapFunction_< CastPolicy::fp32_append_dtype, device_type, Redispatch, F, Ret, guts::typelist::typelist<Args...>> { static Ret call(Args... args) { c10::impl::ExcludeDispatchKeyGuard no_autocast( get_autocast_dispatch_key_from_device_type(device_type)); at::ScalarType out_type = type_from_firstarg(device_type, at::kFloat, args...); return (*F)(args..., out_type); } }; // CastPolicy::promote General_DeviceType template < c10::DeviceType device_type, class Redispatch, Redispatch* F, class Ret, class... Args> struct WrapFunction_< CastPolicy::promote, device_type, Redispatch, F, Ret, guts::typelist::typelist<Args...>> { static Ret call(Args... args) { c10::impl::ExcludeDispatchKeyGuard no_autocast( get_autocast_dispatch_key_from_device_type(device_type)); auto to_type = promote_type( get_lower_precision_fp_from_device_type(device_type), device_type, args...); return (*F)(cached_cast(to_type, args, device_type)...); } }; // Wrapper to infer return_type and parameter_types for WrapFunction_ (imitating // core/boxing/impl/WrapFunctionIntoFunctor.h) template < CastPolicy policy, c10::DeviceType device_type, class Registered, // The signature for which we're registering. The // dispatcher's calling code invokes our registered // functions with arguments matching Registered, so we // register WrapFunction_::call methods with a matching // signature to properly field those arguments. // guts::function_traits below extracts return_type and // parameter_types from Registered, which WrapFunction_ // templates above use to declare their call methods. class Redispatch, // The signature for the function we're redispatching to. // In most cases this is the same as Registered, but for // some ops (for example, ops where we append a dtype) // it's useful to redispatch to a function with a // different signature. Redispatch* F> // The actual function we're redispatching to. struct WrapFunction final { using type = WrapFunction_< policy, device_type, Redispatch, F, typename guts::function_traits<Registered>::return_type, typename guts::function_traits<Registered>::parameter_types>; }; /***************************************************************************************************************** This section performs load-time registration for autocast wrappers. It's debatable at what level operations should be patched. We'd like casts to be autograd-exposed and precede autograd history recording, so that for lower_precision_fp ops, input tensors are saved for backward in lower_precision_fp rather than fp32. Saving inputs in lower_precision_fp can significantly reduce a model's memory footprint. Option 1 (strawman): Patch only at the level of explicit calls into cudnn/cublas (cudnn_convolution, etc), because those are the code paths that are guaranteed to use Tensor Cores, therefore they're the ones that will benefit most from lower_precision_fp. Potential pitfall: convolutions (and other ops) are wrapped in several layers of at::* calls. If one of those happens to record autograd history, then we've lost the opportunity to save inputs in lower_precision_fp. Option 2: Patch the Python-exposed surface of calls, to make 100% sure autograd history recording can't sneak in ahead of autocast. This mirrors Apex most closely. I think Option 2 is the right answer for all ops, not just convolutions. Option 2 is what I implement here. *****************************************************************************************************************/ /******************************************************************************************************************** Explicit registration for out-of-place ops The stuff below could be codegenned. Ed said > you are going to have to write the function definition at some point, I wouldn't try to get clever about it Therefore, for the moment, this is all copy pasted in from VariableTypeEverything.cpp with appropriate substitutions. ********************************************************************************************************************/ } // namespace autocast } // namespace at #define ADD_NS(RAW_OP) at::RAW_OP // Common cases where registration signature matches redispatch signature // (that's why SIGNATURE is repeated in the WrapFunction instantiation) #define KERNEL(DISPATCHKEY, OP, POLICY) \ m.impl( \ TORCH_SELECTIVE_NAME("aten::" #OP), \ &::at::autocast::WrapFunction< \ ::at::autocast::CastPolicy::POLICY, \ DISPATCHKEY, \ decltype(ATEN_FN(OP)), \ decltype(ATEN_FN(OP)), \ &ATEN_FN(OP)>::type::call); #define KERNEL2(DISPATCHKEY, OP, OVERLOAD, POLICY) \ m.impl( \ TORCH_SELECTIVE_NAME("aten::" #OP "." #OVERLOAD), \ &::at::autocast::WrapFunction< \ ::at::autocast::CastPolicy::POLICY, \ DISPATCHKEY, \ decltype(ATEN_FN2(OP, OVERLOAD)), \ decltype(ATEN_FN2(OP, OVERLOAD)), \ &ATEN_FN2(OP, OVERLOAD)>::type::call); // Less-common but still useful case: redispatching to a function // with a new signature (e.g. appending a dtype) #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ DISPATCHKEY, \ REDISPATCH_FUNC, \ REGISTER_NAME, \ REGISTER_SIGNATURE, \ REDISPATCH_SIGNATURE, \ POLICY) \ m.impl( \ TORCH_SELECTIVE_NAME("aten::" REGISTER_NAME), \ &::at::autocast::WrapFunction< \ ::at::autocast::CastPolicy::POLICY, \ DISPATCHKEY, \ REGISTER_SIGNATURE, \ REDISPATCH_SIGNATURE, \ &REDISPATCH_FUNC>::type::call); // KERNEL_CPU/KERNEL_CPU2/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU // registration for AutocastCPU #define KERNEL_CPU(OP, POLICY) KERNEL(c10::DeviceType::CPU, OP, POLICY) #define KERNEL_CPU2(OP, OVERLOAD, POLICY) \ KERNEL2(c10::DeviceType::CPU, OP, OVERLOAD, POLICY) #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CPU( \ REDISPATCH_FUNC, \ REGISTER_NAME, \ REGISTER_SIGNATURE, \ REDISPATCH_SIGNATURE, \ POLICY) \ KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ c10::DeviceType::CPU, \ REDISPATCH_FUNC, \ REGISTER_NAME, \ REGISTER_SIGNATURE, \ REDISPATCH_SIGNATURE, \ POLICY) // KERNEL_CUDA/KERNEL_CUDA2/KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA // registration for AutocastCUDA #define KERNEL_CUDA(OP, POLICY) KERNEL(c10::DeviceType::CUDA, OP, POLICY) #define KERNEL_CUDA2(OP, OVERLOAD, POLICY) \ KERNEL2(c10::DeviceType::CUDA, OP, OVERLOAD, POLICY) #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_CUDA( \ REDISPATCH_FUNC, \ REGISTER_NAME, \ REGISTER_SIGNATURE, \ REDISPATCH_SIGNATURE, \ POLICY) \ KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ c10::DeviceType::CUDA, \ REDISPATCH_FUNC, \ REGISTER_NAME, \ REGISTER_SIGNATURE, \ REDISPATCH_SIGNATURE, \ POLICY) // KERNEL_PRIVATEUSEONE/KERNEL_PRIVATEUSEONE2/ // KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE // registration for AutocastPrivateUse1 #define KERNEL_PRIVATEUSEONE(OP, POLICY) \ KERNEL(c10::DeviceType::PrivateUse1, OP, POLICY) #define KERNEL_PRIVATEUSEONE2(OP, OVERLOAD, POLICY) \ KERNEL2(c10::DeviceType::PrivateUse1, OP, OVERLOAD, POLICY) #define KERNEL_DIFFERENT_REDISPATCH_SIGNATURE_PRIVATEUSEONE( \ REDISPATCH_FUNC, \ REGISTER_NAME, \ REGISTER_SIGNATURE, \ REDISPATCH_SIGNATURE, \ POLICY) \ KERNEL_DIFFERENT_REDISPATCH_SIGNATURE( \ c10::DeviceType::PrivateUse1, \ REDISPATCH_FUNC, \ REGISTER_NAME, \ REGISTER_SIGNATURE, \ REDISPATCH_SIGNATURE, \ POLICY)
24,267
36.392912
117
h
null
pytorch-main/aten/src/ATen/code_template.h
#pragma once #include <c10/util/irange.h> #include <sstream> #include <string> #include <unordered_map> #include <vector> namespace at { namespace jit { // A template environment is a mapping from template variable names, e.g., // identifier (corresponding to $identifier) to their expansions. // // This template environment supports storing strings, numbers and lists // of strings, and can be chained together (so that lookup proceeds in // in the top level environment, and then recurses into a parent // environment if the key is not found.) struct TemplateEnv { TemplateEnv() = default; TemplateEnv(TemplateEnv& parent) : parent(&parent) {} using string_list = std::vector<std::string>; // Add a string 'v' to the map at key 'k'. void s(const std::string& k, const std::string& v) { strings_[k] = v; lists_.erase(k); } // Add a number 'v' to the map at key 'k' template <typename T> void d(const std::string& k, const T& v) { strings_[k] = c10::to_string(v); lists_.erase(k); } // Retrieve the string representation of the value stored at 'k' from the map. // Raises an exception if the key is not found. const std::string& s(const std::string& k) const { if (strings_.count(k) == 0) { if (parent) { return parent->s(k); } notFound(k); } return strings_.at(k); } // Store a list of strings 'v' in the map at 'k'. void v(const std::string& k, const string_list& v) { lists_[k] = v; strings_.erase(k); } // Retrieve a list of strings stored at 'k' from the map. // Raises an exception if the key is not found. const string_list& v(const std::string& k) const { if (lists_.count(k) == 0) { if (parent) { return parent->v(k); } notFound(k); } return lists_.at(k); } // Test if a string 'k' is a string (as opposed to a list.) bool keyIsString(const std::string& k) const { if (strings_.count(k) > 0) return true; if (lists_.count(k) > 0) return false; if (parent) return parent->keyIsString(k); notFound(k); } private: [[noreturn]] void notFound(const std::string& k) const { std::stringstream ss; ss << "key not found: " << k; throw std::logic_error(ss.str()); } std::unordered_map<std::string, std::string> strings_; std::unordered_map<std::string, string_list> lists_; TemplateEnv* parent{nullptr}; }; /* # Match $identifier or ${identifier} and replace with the value in env. # If this identifier is at the beginning of whitespace on a line # and its value is a list then it is treated as # block substitution by indenting all lines of all elements. # If the identifier is on a line starting with non-whitespace and a list # then it is comma separated. ${,foo} will insert a comma before the list # if this list is not empty and ${foo,} will insert one after. */ struct CodeTemplate { /* implicit */ CodeTemplate(std::string t) : template_text(std::move(t)) {} std::string format(const TemplateEnv& env) const { std::stringstream out; size_t pos = 0; size_t indent = 0; bool all_whitespace = true; while (pos < template_text.size()) { char c = template_text[pos]; if (c == '$') { std::stringstream kss; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool comma_before; // NOLINTNEXTLINE(cppcoreguidelines-init-variables) bool comma_after; size_t new_pos = parseKey(pos, kss, comma_before, comma_after); std::string k = kss.str(); bool is_string = env.keyIsString(k); if (all_whitespace) { if (is_string) emitStringWithIndents(out, indent, env.s(k)); else emitLinesIndented(out, indent, env.v(k)); } else { if (is_string) out << env.s(k); else emitCommaSeparatedList(out, env.v(k), comma_before, comma_after); } all_whitespace = false; pos = new_pos; } else { out << c; if (!isspace(c)) all_whitespace = false; indent++; if (c == '\n') { indent = 0; all_whitespace = true; } pos++; } } return out.str(); } private: using string_list = std::vector<std::string>; char charAt(size_t p) const { if (p >= template_text.size()) throw std::logic_error("EOS found in key"); return template_text[p]; } size_t parseKey( size_t pos, std::ostream& k, bool& comma_before, bool& comma_after) const { comma_before = false; comma_after = false; pos++; if (charAt(pos) == '{') { pos++; if (charAt(pos) == ',') { comma_before = true; pos++; } pos = parseIdent(pos, k); if (charAt(pos) == ',') { comma_after = true; pos++; } if (charAt(pos) != '}') throw std::logic_error("missing terminating '}'"); pos++; return pos; } else { return parseIdent(pos, k); } } size_t parseIdent(size_t pos, std::ostream& k) const { while (pos < template_text.size() && (isalnum(template_text[pos]) || template_text[pos] == '_')) { k << template_text[pos]; pos++; } return pos; } void emitCommaSeparatedList( std::ostream& out, const string_list& strings, bool comma_before, bool comma_after) const { if (comma_before && !strings.empty()) out << ", "; for (const auto i : c10::irange(strings.size())) { if (i > 0) out << ", "; out << strings[i]; } if (comma_after && !strings.empty()) out << ", "; } // These indentation functions follow the convention that they never emit // leading or trailing newlines when the input string does not have leading // or trailing newlines. It's the responsibility of the calling function // to indent correctly in the context. void emitIndent(std::ostream& out, size_t indent) const { for (const auto i : c10::irange(indent)) { (void)i; // Suppress unused variable warning out << " "; } } void emitStringWithIndents( std::ostream& out, size_t indent, const std::string& str) const { for (auto c : str) { out << c; if (c == '\n') { emitIndent(out, indent); } } } void emitLinesIndented( std::stringstream& out, size_t indent, const string_list& strings) const { for (const auto i : c10::irange(strings.size())) { if (i > 0) emitIndent(out, indent); emitStringWithIndents(out, indent, strings[i]); if (i + 1 != strings.size()) out << "\n"; } } std::string template_text; }; static inline std::string format(const std::string& fmt, TemplateEnv& env) { return CodeTemplate(fmt).format(env); } } // namespace jit } // namespace at
6,928
27.052632
80
h
null
pytorch-main/aten/src/ATen/cpp_custom_type_hack.h
// STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // YOU ARE IN THE WRONG PLACE! TURN BACK NOW! // This code was a temporary hack to enable embedding arbitrary C++ structures // into Tensors. THIS IS UNSAFE AND IS NOT SUPPORTED. IF YOU USE THIS CODE, // IT __WILL__ BREAK. // This code has been superseded by custom classes: // https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html // Please use custom classes and **DO NOT ADD MORE CALLSITES TO THINGS DEFINED // IN THIS FILE**. // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP // STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP STOP #include <ATen/TracerMode.h> #include <ATen/core/Tensor.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty.h> #endif namespace at { namespace cpp_custom_type_hack { template <typename T> [[deprecated( "Use custom classes instead: " "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] bool isa(const Tensor& packed) { return (packed.scalar_type() == kByte) && (packed.storage().data_ptr().get_deleter() == caffe2::TypeMeta::Make<T>().deleteFn()); } template <typename T> [[deprecated( "Use custom classes instead: " "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] T& cast(const Tensor& packed) { TORCH_CHECK( packed.scalar_type() == kByte, "Expected temporary cpp type wrapper"); TORCH_CHECK( packed.storage().data_ptr().get_deleter() == caffe2::TypeMeta::Make<T>().deleteFn(), "Expected temporary cpp type wrapper of type ", caffe2::TypeMeta::TypeName<T>()); return *reinterpret_cast<T*>(packed.storage().data_ptr().get()); } template <typename T> [[deprecated( "Use custom classes instead: " "https://pytorch.org/tutorials/advanced/torch_script_custom_classes.html")]] Tensor create(std::unique_ptr<T> ptr, TensorOptions options) { // None of this should trace, so turn off Tracer dispatching at::AutoDispatchBelowADInplaceOrView guard; // TODO: remove at::tracer::impl::NoTracerDispatchMode tracer_guard; // We store this instance away in a Tensor and register a deleter function // so that we do not leak memory. On the other side, we pull out the storage's // data_ptr and get the right typed pointer. void* raw_ptr = ptr.release(); at::DataPtr at_ptr( raw_ptr, raw_ptr, caffe2::TypeMeta::Make<T>().deleteFn(), at::kCPU); // size doesn't really matter, but we can align it to the actual size // returning variables because one likely want to use this hack from python auto retval = at::empty({sizeof(T)}, options.device(kCPU).dtype(at::kByte)); retval.storage().set_data_ptr_noswap(std::move(at_ptr)); return retval; } } // namespace cpp_custom_type_hack } // namespace at
5,455
47.283186
87
h
null
pytorch-main/aten/src/ATen/dlpack.h
/*! * Copyright (c) 2017 by Contributors * \file dlpack.h * \brief The common header of DLPack. */ #ifndef DLPACK_DLPACK_H_ #define DLPACK_DLPACK_H_ /** * \brief Compatibility with C++ */ #ifdef __cplusplus #define DLPACK_EXTERN_C extern "C" #else #define DLPACK_EXTERN_C #endif /*! \brief The current version of dlpack */ #define DLPACK_VERSION 70 /*! \brief The current ABI version of dlpack */ #define DLPACK_ABI_VERSION 1 /*! \brief DLPACK_DLL prefix for windows */ #ifdef _WIN32 #ifdef DLPACK_EXPORTS #define DLPACK_DLL __declspec(dllexport) #else #define DLPACK_DLL __declspec(dllimport) #endif #else #define DLPACK_DLL #endif #include <stddef.h> #include <stdint.h> #ifdef __cplusplus extern "C" { #endif /*! * \brief The device type in DLDevice. */ #ifdef __cplusplus typedef enum : int32_t { #else typedef enum { #endif /*! \brief CPU device */ kDLCPU = 1, /*! \brief CUDA GPU device */ kDLCUDA = 2, /*! * \brief Pinned CUDA CPU memory by cudaMallocHost */ kDLCUDAHost = 3, /*! \brief OpenCL devices. */ kDLOpenCL = 4, /*! \brief Vulkan buffer for next generation graphics. */ kDLVulkan = 7, /*! \brief Metal for Apple GPU. */ kDLMetal = 8, /*! \brief Verilog simulator buffer */ kDLVPI = 9, /*! \brief ROCm GPUs for AMD GPUs */ kDLROCM = 10, /*! * \brief Pinned ROCm CPU memory allocated by hipMallocHost */ kDLROCMHost = 11, /*! * \brief Reserved extension device type, * used for quickly test extension device * The semantics can differ depending on the implementation. */ kDLExtDev = 12, /*! * \brief CUDA managed/unified memory allocated by cudaMallocManaged */ kDLCUDAManaged = 13, /*! * \brief Unified shared memory allocated on a oneAPI non-partititioned * device. Call to oneAPI runtime is required to determine the device * type, the USM allocation type and the sycl context it is bound to. * */ kDLOneAPI = 14, /*! \brief GPU support for next generation WebGPU standard. */ kDLWebGPU = 15, /*! \brief Qualcomm Hexagon DSP */ kDLHexagon = 16, } DLDeviceType; /*! * \brief A Device for Tensor and operator. */ // NB: This is the only difference from // https://github.com/dmlc/dlpack/blob/v0.7/include/dlpack/dlpack.h Required to // allow forward declaration of DLDevice. typedef struct DLDevice_ { /*! \brief The device type used in the device. */ DLDeviceType device_type; /*! * \brief The device index. * For vanilla CPU memory, pinned memory, or managed memory, this is set to 0. */ int32_t device_id; } DLDevice; /*! * \brief The type code options DLDataType. */ typedef enum { /*! \brief signed integer */ kDLInt = 0U, /*! \brief unsigned integer */ kDLUInt = 1U, /*! \brief IEEE floating point */ kDLFloat = 2U, /*! * \brief Opaque handle type, reserved for testing purposes. * Frameworks need to agree on the handle data type for the exchange to be * well-defined. */ kDLOpaqueHandle = 3U, /*! \brief bfloat16 */ kDLBfloat = 4U, /*! * \brief complex number * (C/C++/Python layout: compact struct per complex number) */ kDLComplex = 5U, } DLDataTypeCode; /*! * \brief The data type the tensor can hold. The data type is assumed to follow * the native endian-ness. An explicit error message should be raised when * attempting to export an array with non-native endianness * * Examples * - float: type_code = 2, bits = 32, lanes=1 * - float4(vectorized 4 float): type_code = 2, bits = 32, lanes=4 * - int8: type_code = 0, bits = 8, lanes=1 * - std::complex<float>: type_code = 5, bits = 64, lanes = 1 */ typedef struct { /*! * \brief Type code of base types. * We keep it uint8_t instead of DLDataTypeCode for minimal memory * footprint, but the value should be one of DLDataTypeCode enum values. * */ uint8_t code; /*! * \brief Number of bits, common choices are 8, 16, 32. */ uint8_t bits; /*! \brief Number of lanes in the type, used for vector types. */ uint16_t lanes; } DLDataType; /*! * \brief Plain C Tensor object, does not manage memory. */ typedef struct { /*! * \brief The data pointer points to the allocated data. This will be CUDA * device pointer or cl_mem handle in OpenCL. It may be opaque on some device * types. This pointer is always aligned to 256 bytes as in CUDA. The * `byte_offset` field should be used to point to the beginning of the data. * * Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow, * TVM, perhaps others) do not adhere to this 256 byte aligment requirement * on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed * (after which this note will be updated); at the moment it is recommended * to not rely on the data pointer being correctly aligned. * * For given DLTensor, the size of memory required to store the contents of * data is calculated as follows: * * \code{.c} * static inline size_t GetDataSize(const DLTensor* t) { * size_t size = 1; * for (tvm_index_t i = 0; i < t->ndim; ++i) { * size *= t->shape[i]; * } * size *= (t->dtype.bits * t->dtype.lanes + 7) / 8; * return size; * } * \endcode */ void* data; /*! \brief The device of the tensor */ DLDevice device; /*! \brief Number of dimensions */ int32_t ndim; /*! \brief The data type of the pointer*/ DLDataType dtype; /*! \brief The shape of the tensor */ int64_t* shape; /*! * \brief strides of the tensor (in number of elements, not bytes) * can be NULL, indicating tensor is compact and row-majored. */ int64_t* strides; /*! \brief The offset in bytes to the beginning pointer to data */ uint64_t byte_offset; } DLTensor; /*! * \brief C Tensor object, manage memory of DLTensor. This data structure is * intended to facilitate the borrowing of DLTensor by another framework. It is * not meant to transfer the tensor. When the borrowing framework doesn't need * the tensor, it should call the deleter to notify the host that the resource * is no longer needed. */ typedef struct DLManagedTensor { /*! \brief DLTensor which is being memory managed */ DLTensor dl_tensor; /*! \brief the context of the original host framework of DLManagedTensor in * which DLManagedTensor is used in the framework. It can also be NULL. */ void* manager_ctx; /*! \brief Destructor signature void (*)(void*) - this should be called * to destruct manager_ctx which holds the DLManagedTensor. It can be NULL * if there is no way for the caller to provide a reasonable destructor. * The destructors deletes the argument self as well. */ void (*deleter)(struct DLManagedTensor* self); } DLManagedTensor; #ifdef __cplusplus } // DLPACK_EXTERN_C #endif #endif // DLPACK_DLPACK_H_
6,833
28.205128
80
h
null
pytorch-main/aten/src/ATen/jiterator_macros.h
#pragma once #include <c10/macros/Macros.h> #include <string> #define JITERATOR_HOST_DEVICE C10_HOST_DEVICE #if defined(_MSC_VER) && defined(__CUDACC__) // NVRTC on Windows errors if __host__ __device__ attribute is // present on kernel. // error: attribute "__host__" does not apply here // error: attribute "__device__" does not apply here #define JITERATOR_HOST_DEVICE #endif // jiterator_also_stringify_as macro is used to define code (for CPU/ROCm) // and generate code string for `jiterator` (only when compiling for CUDA). // Usage : // jiterator_also_stringify_as( // jiterator_code(template <typename T> T identity(T x) { return x; }), // identity_string); // This will define the template `identity` as present in code and // also define `std::string identity_string` with the code as the string // if this is being compiled for CUDA. // `jiterator_code` macro is to deal with `,` in the kernel code. // These `,`s confuse the preprocessor into thinking we are passing // multiple arguments to the macro. #define jiterator_code(...) __VA_ARGS__ #if defined(__CUDACC__) || defined(__HIPCC__) // CPU and CUDA and ROCm case #define stringify_code(...) #__VA_ARGS__ #define jiterator_also_stringify_as(code, str_name) \ code /* define the function */ \ const std::string str_name = std::string(stringify_code(code)); #else // CPU only or CPU and ROCm case // Only needs the function #define jiterator_also_stringify_as(code, str_name) code #endif
1,506
37.641026
80
h
null
pytorch-main/aten/src/ATen/record_function.h
#pragma once #include <ATen/core/ivalue.h> #include <ATen/core/operator_name.h> #include <c10/macros/Export.h> #include <c10/util/Optional.h> #include <c10/util/SmallVector.h> #include <c10/util/variant.h> #include <array> #include <atomic> #include <functional> #include <memory> namespace c10 { class TORCH_API OperatorHandle; } namespace at { // Kind of record function scope; enum class C10_API_ENUM RecordScope : uint8_t { // c10/ATen ops, autograd nodes FUNCTION = 0, // Functions/nodes called from the autograd BACKWARD_FUNCTION, // TorchScript functions, methods TORCHSCRIPT_FUNCTION, // Kernel Function dtype Tag KERNEL_FUNCTION_DTYPE, // Torchbind custom class, CUSTOM_CLASS, // Generic Build Feature BUILD_FEATURE, // Kernel Function dtype Tag LITE_INTERPRETER, // User defined scope (e.g. with record_function()) USER_SCOPE, // Scopes for static runtime, a specialized TorchScript interpreter STATIC_RUNTIME_OP, STATIC_RUNTIME_MODEL, NUM_SCOPES, // must be the last in the list }; } // namespace at namespace std { template <> struct hash<at::RecordScope> { size_t operator()(const at::RecordScope& sc) const { return static_cast<std::size_t>(sc); } }; } // namespace std namespace at { struct TORCH_API StringView { StringView() : StringView(nullptr) {} explicit StringView(const char* str_ptr) : owned_str_ptr_(nullptr), str_ptr_(str_ptr) {} explicit StringView(std::string str) : owned_str_ptr_(std::make_shared<std::string>(std::move(str))), str_ptr_(owned_str_ptr_->c_str()) {} const char* str() const { return str_ptr_; } friend std::ostream& operator<<(std::ostream& os, const StringView& dt) { os << dt.str(); return os; } friend bool operator==(const StringView& lhs, const StringView& rhs) { return strcmp(lhs.str(), rhs.str()) == 0; } friend bool operator!=(const StringView& lhs, const StringView& rhs) { return !(lhs == rhs); } private: std::shared_ptr<std::string> owned_str_ptr_; const char* str_ptr_; }; // Soft limit on the number of callbacks to use; constexpr std::size_t kSoftLimitCallbacks = 4; // An abstract base class for various observer contexts that can be attached to // the RecordFunction. struct ObserverContext { virtual ~ObserverContext() = default; protected: ObserverContext() {} }; typedef c10::SmallVector<uint64_t, kSoftLimitCallbacks> CallbackHandles; typedef c10::SmallVector<std::unique_ptr<ObserverContext>, kSoftLimitCallbacks> ObserverContextList; typedef uint64_t RecordFunctionHandle; struct RecordFunction; // // PyTorch callbacks/observers API: // /** * RecordFunctionCallback represents a pair of callbacks to be used with * RecordFunction, members: * start, end - the callbacks to run when entering and exiting the scope; * optionally, the start callback may return an ObserverContext which will * be passed to the end callback, use appropriate constructor accordingly. * needs_inputs - whether the callbacks need the inputs passed from the * observed function/range; NOTE: passing the inputs incurs an additional * overhead; sampling_probability - if not 1.0, then the callback is * probabilistically sampled to run; NOTE: start and end callbacks always run as * a pair and are sampled together; scopes - types of scopes to execute the * callbacks on (see RecordScope); passing empty set means the callbacks will be * executed for all possible scope types should_run - optional function that * returns whether this callback should run; overwrites the effect of setting * sampling_probability */ class TORCH_API RecordFunctionCallback { public: using StartCallback = std::unique_ptr<ObserverContext> (*)(const RecordFunction&); using EndCallback = void (*)(const RecordFunction&, ObserverContext*); // This interface supports observers that require passing an ObserverContext // between start and end callbacks. explicit RecordFunctionCallback( StartCallback start, EndCallback end = nullptr) : start_(start), end_(end) { scopes_.fill(true); } RecordFunctionCallback& needsInputs(bool needs_inputs) { needs_inputs_ = needs_inputs; return *this; } RecordFunctionCallback& needsOutputs(bool needs_outputs) { needs_outputs_ = needs_outputs; return *this; } RecordFunctionCallback& needsIds(bool needs_ids) { needs_ids_ = needs_ids; return *this; } RecordFunctionCallback& samplingProb(double sampling_prob) { TORCH_CHECK( sampling_prob >= 0.0 && sampling_prob <= 1.0, "Invalid sampling probability"); sampling_prob_ = sampling_prob; return *this; } RecordFunctionCallback& scopes( const std::unordered_set<RecordScope, std::hash<RecordScope>>& scopes) { if (!scopes.empty()) { scopes_.fill(false); for (auto sc : scopes) { scopes_[static_cast<size_t>(sc)] = true; } } else { scopes_.fill(true); } return *this; } bool needsInputs() const { return needs_inputs_; } bool needsOutputs() const { return needs_outputs_; } bool needsIds() const { return needs_ids_; } double samplingProb() const { return sampling_prob_; } bool checkScope(RecordScope sc) const { return scopes_[(size_t)sc]; } StartCallback start() const { return start_; } EndCallback end() const { return end_; } private: StartCallback start_; EndCallback end_; double sampling_prob_ = 1.0; std::array<bool, static_cast<size_t>(RecordScope::NUM_SCOPES)> scopes_ = {}; bool needs_inputs_ = false; bool needs_outputs_ = false; bool needs_ids_ = false; }; // Notes: // - two types of callbacks are provided: thread local and global // - thread local callbacks are added/removed only for the given thread // and are stored locally for each thread and separately from the list // of the global callbacks // - global callbacks are stored in a single per process list and are // invoked by every RecordFunction, in addition to the thread local // callbacks specific to the given thread // - we allow the added callbacks to be sampled, by specifying a sampling // probability for each callback pair, if the start callback is // not picked to run, the corresponding end callback won't be called // - a typical use case for the global callbacks is passive monitoring // in the background (e.g. fleet-wide monitoring), without focusing on // the specific piece of code // - in contrast, thread local callbacks are enabled locally, on demand, // for the specific piece of code (range) and are not sampled // - a typical use case for thread local callbacks is profiler and code // execution tracer // - note, thread local callbacks are automatically propagated with // ThreadLocalState across JIT continuations and async tasks (at::launch) typedef uint64_t CallbackHandle; constexpr CallbackHandle INVALID_CALLBACK_HANDLE{0}; // It is unnecessary to use atomic operations for enabling // thread-local function callbacks. Moreover, it prevents saving to // ThreadLocalState because std::atomic is non-copyable. struct RecordFunctionCallbacksEntry { RecordFunctionCallbacksEntry(RecordFunctionCallback&& cb, CallbackHandle h) : callback_(cb), handle_(h) {} RecordFunctionCallback callback_; bool enabled_{true}; CallbackHandle handle_; }; // Holds pairs (callbacks, unique_id) using RecordFunctionCallbacks = std::vector<RecordFunctionCallbacksEntry>; // Generated by the callback managers to determine which functions to run. struct StepCallbacks { StepCallbacks() = default; StepCallbacks(uint64_t thread_id, RecordScope scope) : thread_id_{thread_id}, scope_{scope} {} bool empty() const { return callbacks_.empty(); } struct StartEndPair { RecordFunctionCallback::StartCallback start_; RecordFunctionCallback::EndCallback end_; }; using StartEndPairs = c10::SmallVector<StartEndPair, kSoftLimitCallbacks>; StartEndPairs callbacks_; uint64_t thread_id_{0}; RecordScope scope_{RecordScope::FUNCTION}; bool needs_inputs_{false}; bool needs_outputs_{false}; bool needs_ids_{false}; }; struct TORCH_API RecordFunction { // Default constructor is used with before function called afterwards: // scope - record scope that this function tracks // pre_sampled - whether this RecordFunction was already pre-sampled with // kLowProb probability explicit RecordFunction(RecordScope scope = RecordScope::FUNCTION); explicit RecordFunction(StepCallbacks&& step_callbacks); template <typename F> void before( F fn, c10::ArrayRef<const c10::IValue> args, int64_t current_sequence_nr = -1) { if (!isActive()) { return; } inputs_ = args; before(fn, current_sequence_nr); } template <typename F> void before( F fn, const std::vector<IValue>* args, int64_t current_sequence_nr = -1) { before( std::move(fn), c10::ArrayRef<const c10::IValue>(args->data(), args->size()), current_sequence_nr); } // Destructor calls end callbacks virtual ~RecordFunction(); RecordFunction(const RecordFunction&) = delete; RecordFunction& operator=(const RecordFunction&) = delete; const char* name() const; int64_t seqNr() const { return sequence_nr_; } c10::ArrayRef<const IValue> inputs() const { #ifndef NDEBUG TORCH_INTERNAL_ASSERT_DEBUG_ONLY( inputs_valid_, "Called inputs() outside RecordFunction start callback"); #endif return inputs_; } const std::vector<c10::IValue>& outputs() const { return outputs_; } void setOutputs(std::vector<c10::IValue>&& outputs) { outputs_ = std::move(outputs); } void setOutputs(c10::ArrayRef<c10::IValue> outputs) { outputs_ = outputs.vec(); } size_t num_inputs() const; size_t num_outputs() const; // Retrieves the thread_id that this RecordFunction ran start callbacks with. // Useful for writing thread safe end callbacks that may be potentially // executed in a different thread (async ops) uint64_t threadId() const { return step_callbacks_.thread_id_; } // For backward functions - thread id of the corresponding forward function, // or zero otherwise; // used alongside with sequence number to correlate backward functions with // the forward ones uint64_t forwardThreadId() const { return fwd_thread_id_; } void setForwardThreadId(uint64_t thread_id) { fwd_thread_id_ = thread_id; } RecordScope scope() const { return step_callbacks_.scope_; } // Returns logical thread_id for the current thread static uint64_t currentThreadId(); // Internal functions, do not use directly; // used in python's context manager // before functions initialize RecordFunction members and call // start callbacks using schema_ref_t = std::reference_wrapper<const c10::FunctionSchema>; void before(const char* name, int64_t sequence_nr = -1); void before(std::string name, int64_t sequence_nr = -1); void before(schema_ref_t schema, int64_t sequence_nr = -1); // Sets node ID for distributed profiling static void setDefaultNodeId(int64_t defaultNodeId); // Gets node ID for distributed profiling static int64_t getDefaultNodeId(); // Calls end callbacks. After end(), accessors will no longer provide useful // results. void end(); // Internal-only, used only force async event for distributed events // profiling. void _setAsync(); // Returns whether this RecordFunction corresponds to an async event orn ot. bool isAsync() const; // Internal-only, used to denote out variant used for Static Runtime execution void _setStaticRuntimeOutVariant(); bool isStaticRuntimeOutVariant() const; RecordFunctionHandle handle() const { return handle_; } c10::optional<OperatorName> operator_name() const; // This method returns a copy of the FunctionSchema and can be expensive. c10::optional<FunctionSchema> operator_schema() const; void setHandle(RecordFunctionHandle handle) { handle_ = handle; } // Whether this RecordFunction runs any callbacks. bool isActive() const { return !step_callbacks_.empty(); } bool needsInputs() const { return step_callbacks_.needs_inputs_; } bool needsOutputs() const { return step_callbacks_.needs_outputs_; } int64_t debugHandle() const { return debug_handle_; } void setDebugHandle(int64_t debug_handle) { debug_handle_ = debug_handle; } void invalidateInputs() { #ifndef NDEBUG inputs_valid_ = false; #endif } private: void runStartCallbacks(); StepCallbacks step_callbacks_; // In cases when RecordFunction might be active but we chose not to // use the observers (e.g. operator is not observed), this boolean // flag is used to check whether the start callbacks were called bool called_start_callbacks_ = false; #ifndef NDEBUG bool inputs_valid_ = false; #endif // Stores various ObserverContext objects with event metadata for callbacks. ObserverContextList ctx_; c10::variant<std::string, schema_ref_t> fn_; int64_t sequence_nr_ = -1; c10::ArrayRef<const IValue> inputs_; std::vector<c10::IValue> outputs_; // For backward functions - thread id of the the forward function uint64_t fwd_thread_id_ = 0; // Unique id for this RecordFunction, used in callbacks to track start // and end of ranges RecordFunctionHandle handle_{0}; // Whether this record_function corresponds to an async event or not. Async // events can complete in different threads or follow a future-like pattern // of use. bool is_async_{false}; // Debug handles are used for lazy annotation of module hierarchy // and callstack. // This is specifically is useful for mobile runtime, where generated // debug handles can be lazily symbolicated using debug information int64_t debug_handle_{-1}; // Whether this RecordFunction is used for an out variant run with // Static Runtime bool is_static_runtime_out_variant_{false}; }; TORCH_API StepCallbacks getStepCallbacks(RecordScope scope); TORCH_API c10::optional<StepCallbacks> getStepCallbacksUnlessEmpty( RecordScope scope); namespace detail { template <typename Inputs, typename F, typename... Args> void record_function_with_scope( RecordFunction& guard, F fn, const Inputs& inputs, Args&&... args) { if (guard.needsInputs()) { guard.before( fn, c10::ArrayRef<const c10::IValue>(inputs.data(), inputs.size()), std::forward<Args>(args)...); } else { guard.before(fn, std::forward<Args>(args)...); } } template <typename Inputs, typename F, typename... Args> void record_function_with_scope_and_debug_handle( RecordFunction& guard, F fn, int64_t debug_handle, const Inputs& inputs, Args&&... args) { guard.setDebugHandle(debug_handle); if (guard.needsInputs()) { guard.before( fn, c10::ArrayRef<const c10::IValue>(inputs.data(), inputs.size()), std::forward<Args>(args)...); } else { guard.before(fn, std::forward<Args>(args)...); } } template <typename F, typename... Args> void record_function_with_scope( RecordFunction& guard, F fn, c10::ArrayRef<const c10::IValue> inputs, Args&&... args) { return record_function_with_scope< c10::ArrayRef<const c10::IValue>, F, Args...>(guard, std::move(fn), inputs, std::forward<Args>(args)...); } template <typename F, typename... Args> void record_function_with_scope_and_debug_handle( RecordFunction& guard, F fn, int64_t debug_handle, c10::ArrayRef<const c10::IValue> inputs, Args&&... args) { return record_function_with_scope_and_debug_handle< c10::ArrayRef<const c10::IValue>, F, Args...>( guard, std::move(fn), debug_handle, inputs, std::forward<Args>(args)...); } } // namespace detail // optional argument - function's seq_no #define RECORD_FUNCTION_WITH_SCOPE(scope, fn, inputs, ...) \ at::RecordFunction guard(scope); \ if (guard.isActive()) { \ ::at::detail::record_function_with_scope( \ guard, fn, inputs, ##__VA_ARGS__); \ } #define RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( \ scope, fn, inputs, outputs, ...) \ at::RecordFunction guard(scope); \ if (guard.isActive()) { \ if (guard.needsInputs()) { \ guard.before(fn, inputs, ##__VA_ARGS__); \ } else { \ guard.before(fn, ##__VA_ARGS__); \ } \ if (guard.needsOutputs()) { \ guard.setOutputs(outputs); \ } \ } #define RECORD_FUNCTION(fn, inputs, ...) \ RECORD_FUNCTION_WITH_SCOPE( \ at::RecordScope::FUNCTION, fn, inputs, ##__VA_ARGS__) #define RECORD_TORCHSCRIPT_FUNCTION(mn, inputs) \ RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::TORCHSCRIPT_FUNCTION, mn, inputs) #define RECORD_FUNCTION_WITH_INPUTS_OUTPUTS(fn, inputs, outputs, ...) \ RECORD_FUNCTION_WITH_SCOPE_INPUTS_OUTPUTS( \ at::RecordScope::FUNCTION, fn, inputs, outputs, ##__VA_ARGS__) // Custom user scopes in C++; similar to Python's 'with record_function("..."):' #define RECORD_USER_SCOPE(fn) \ RECORD_FUNCTION_WITH_SCOPE( \ at::RecordScope::USER_SCOPE, fn, c10::ArrayRef<const c10::IValue>{}) // RECORD_USER_SCOPE with inputs #define RECORD_USER_SCOPE_WITH_INPUTS(fn, inputs) \ RECORD_FUNCTION_WITH_SCOPE(at::RecordScope::USER_SCOPE, fn, inputs) // Helper macro to pass in debug handle that is used to // post process events #define RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( \ scope, fn, debug_handle, inputs, ...) \ at::RecordFunction guard(scope); \ if (guard.isActive()) { \ ::at::detail::record_function_with_scope_and_debug_handle( \ guard, fn, debug_handle, inputs, ##__VA_ARGS__); \ } // Helper macros to record LITE INTERPETER scope events with debug handles #define RECORD_EDGE_SCOPE_WITH_DEBUG_HANDLE_AND_INPUTS( \ fn, debug_handle, inputs) \ RECORD_WITH_SCOPE_DEBUG_HANDLE_AND_INPUTS( \ at::RecordScope::LITE_INTERPRETER, fn, debug_handle, inputs) // Bookend to the RECORD_FUNCTION macros. Use this after the kernel // launch to let the profiler bind the outputs to the op that produced // them. Note that guard is declared by RECORD_FUNCTION so this macro // needs to be called from the same scope as RECORD_FUNCTION #define RECORD_OUTPUTS(outputs) \ if (guard.needsOutputs()) { \ guard.setOutputs( \ std::vector<c10::IValue>(outputs.begin(), outputs.end())); \ } /** * addThreadLocalCallback adds a thread local callback to run with * RecordFunction, returns handle to use with removeThreadLocalCallback */ TORCH_API CallbackHandle addThreadLocalCallback(RecordFunctionCallback cb); /** * hasThreadLocalCallbacks returns whether there're callbacks registered * with addThreadLocalCallback */ TORCH_API bool hasThreadLocalCallbacks(); /** * clearThreadLocalCallbacks removes all thread local callbacks */ TORCH_API void clearThreadLocalCallbacks(); /** * addGlobalCallback adds a global callback to run with RecordFunction: * * only during the program initialization */ TORCH_API CallbackHandle addGlobalCallback(RecordFunctionCallback cb); /** * removeCallback removes a callback given the handle returned by * addThreadLocalCallback or addGlobalCallback; * * no other code can run simultaneously */ TORCH_API void removeCallback(CallbackHandle handle); /** * Prevent the given callback from executing. If handle is invalid, * does nothing. */ TORCH_API void disableCallback(CallbackHandle handle); /** * Allow the given callback, previously disabled with disableCallback, to * execute again. If handle is invalid, does nothing. */ TORCH_API void reenableCallback(CallbackHandle handle); /** * hasGlobalCallbacks returns whether there're global callbacks * registered with pushGlobalCallback */ TORCH_API bool hasGlobalCallbacks(); /** * clearGlobalCallbacks removes all global callbacks */ TORCH_API void clearGlobalCallbacks(); // for both thread local and global callbacks TORCH_API bool hasCallbacks(); TORCH_API void clearCallbacks(); /** * enableRecordFunction enables RecordFunction thread locally */ TORCH_API void enableRecordFunction(bool enable = true); /** * isRecordFunctionEnabled returns whether RecordFunction * is enabled thread locally */ TORCH_API bool isRecordFunctionEnabled(); class TORCH_API RecordFunctionGuard { public: explicit RecordFunctionGuard(bool is_enabled = true) : prev_value_(isRecordFunctionEnabled()) { enableRecordFunction(is_enabled); } virtual ~RecordFunctionGuard() { enableRecordFunction(prev_value_); } private: bool prev_value_ = false; }; class TORCH_API DisableRecordFunctionGuard : public RecordFunctionGuard { public: DisableRecordFunctionGuard() : RecordFunctionGuard(false) {} ~DisableRecordFunctionGuard() override = default; }; struct TORCH_API RecordFunctionTLS { // Thread local vector of callbacks, holds pairs (callbacks, unique_id); // must be sorted in increasing handles order RecordFunctionCallbacks sorted_tls_callbacks_; bool tls_record_function_enabled_ = true; }; TORCH_API const RecordFunctionTLS& get_record_function_tls_(); TORCH_API void set_record_function_tls_(const RecordFunctionTLS& tls); TORCH_API void set_record_function_seed_for_testing(uint32_t seed); } // namespace at
22,083
29.252055
80
h
null
pytorch-main/aten/src/ATen/core/ATen_fwd.h
#pragma once #include <c10/core/QScheme.h> // Forward declarations of core ATen types used in dispatch functions namespace c10 { template<typename T> class optional; template<typename T> class List; template<typename T> class IListRef; class Stream; class Scalar; class SymInt; class SymIntList; struct Storage; struct TensorOptions; template <typename T> class ArrayRef; template <typename T> class OptionalArrayRef; } // namespace c10 namespace at { class Tensor; class OptionalTensorRef; struct Dimname; struct Generator; using TensorList = c10::ArrayRef<Tensor>; using ITensorListRef = c10::IListRef<Tensor>; using IOptTensorListRef = c10::IListRef<OptionalTensorRef>; using DimnameList = c10::ArrayRef<Dimname>; using IntArrayRef = c10::ArrayRef<int64_t>; using OptionalIntArrayRef = c10::OptionalArrayRef<int64_t>; using OptionalSymIntArrayRef = c10::OptionalArrayRef<c10::SymInt>; using c10::Stream; using c10::Storage; using c10::QScheme; using c10::Scalar; using c10::SymInt; using c10::SymIntList; using c10::TensorOptions; } // namespace at
1,061
20.673469
69
h
null
pytorch-main/aten/src/ATen/core/ATen_pch.h
// This global header must not depend on native_functions.yaml or // incremental builds will be next to useless #pragma push_macro("TORCH_ASSERT_NO_OPERATORS") #define TORCH_ASSERT_NO_OPERATORS // This macro doesn't work if defined after the first time inttypes.h // is included, so won't work anywhere if not defined here. #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <cinttypes> // This list of headers was generated using a script that finds // high-impact headers and then manually tweaked to remove OS specific // or duplicate headers (e.g. <cassert> and <assert.h>) and to remove // "impl" headers (e.g BFloat16-inl.h or complex_math.h in c10). // To generate the initial list: // 1. Build pytorch from scratch with all build caching disabled // 2. Generate a build trace with ninjatracing (https://github.com/nico/ninjatracing) // $ ninjatracing /path/to/pytorch/build/.ninja_log > trace_all.json // 3. Run pch_gen.py from https://github.com/peterbell10/build_analysis/ // $ python pch_gen.py --threshold .80 --target torch_cpu --build_dir /path/to/pytorch/build --trace trace_all.json // Where the threshold can be tweaked until c10 and some of ATen // core are included but TORCH_ASSERT_NO_OPERATORS still passes. #include <cassert> #include <cctype> #include <cerrno> #include <climits> #include <clocale> #include <cmath> #include <cstddef> #include <cstdint> #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> #include <cwchar> #include <cwctype> #include <algorithm> #include <array> #include <atomic> #include <chrono> #include <complex> #include <deque> #include <exception> #include <fstream> #include <functional> #include <initializer_list> #include <iomanip> #include <ios> #include <iosfwd> #include <istream> #include <iterator> #include <limits> #include <locale> #include <map> #include <memory> #include <mutex> #include <new> #include <numeric> #include <ostream> #include <ratio> #include <set> #include <sstream> #include <stdexcept> #include <streambuf> #include <string> #include <system_error> #include <tuple> #include <type_traits> #include <typeinfo> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include <c10/core/Allocator.h> #include <c10/core/AutogradState.h> #include <c10/core/Backend.h> #include <c10/core/CopyBytes.h> #include <c10/core/DefaultDtype.h> #include <c10/core/Device.h> #include <c10/core/DeviceGuard.h> #include <c10/core/DeviceType.h> #include <c10/core/DispatchKey.h> #include <c10/core/DispatchKeySet.h> #include <c10/core/GeneratorImpl.h> #include <c10/core/GradMode.h> #include <c10/core/InferenceMode.h> #include <c10/core/Layout.h> #include <c10/core/MemoryFormat.h> #include <c10/core/QScheme.h> #include <c10/core/ScalarType.h> #include <c10/core/ScalarTypeToTypeMeta.h> #include <c10/core/Storage.h> #include <c10/core/StorageImpl.h> #include <c10/core/Stream.h> #include <c10/core/SymInt.h> #include <c10/core/SymIntArrayRef.h> #include <c10/core/TensorImpl.h> #include <c10/core/TensorOptions.h> #include <c10/core/UndefinedTensorImpl.h> #include <c10/core/WrapDimMinimal.h> #include <c10/core/impl/DeviceGuardImplInterface.h> #include <c10/core/impl/InlineDeviceGuard.h> #include <c10/core/impl/LocalDispatchKeySet.h> #include <c10/core/impl/PyInterpreter.h> #include <c10/core/impl/SizesAndStrides.h> #include <c10/core/impl/VirtualGuardImpl.h> #include <c10/macros/Export.h> #include <c10/macros/Macros.h> #include <c10/util/AlignOf.h> #include <c10/util/Array.h> #include <c10/util/ArrayRef.h> #include <c10/util/BFloat16.h> #include <c10/util/Backtrace.h> #include <c10/util/C++17.h> #include <c10/util/ConstexprCrc.h> #include <c10/util/Deprecated.h> #include <c10/util/Exception.h> #include <c10/util/ExclusivelyOwned.h> #include <c10/util/Flags.h> #include <c10/util/Half.h> #include <c10/util/IdWrapper.h> #include <c10/util/Logging.h> #include <c10/util/MaybeOwned.h> #include <c10/util/Metaprogramming.h> #include <c10/util/Optional.h> #include <c10/util/Registry.h> #include <c10/util/SmallVector.h> #include <c10/util/StringUtil.h> #include <c10/util/ThreadLocalDebugInfo.h> #include <c10/util/Type.h> #include <c10/util/TypeCast.h> #include <c10/util/TypeIndex.h> #include <c10/util/TypeList.h> #include <c10/util/TypeSafeSignMath.h> #include <c10/util/TypeTraits.h> #include <c10/util/UniqueVoidPtr.h> #include <c10/util/accumulate.h> #include <c10/util/complex.h> #include <c10/util/flat_hash_map.h> #include <c10/util/in_place.h> #include <c10/util/intrusive_ptr.h> #include <c10/util/irange.h> #include <c10/util/llvmMathExtras.h> #include <c10/util/python_stub.h> #include <c10/util/qint32.h> #include <c10/util/qint8.h> #include <c10/util/quint2x4.h> #include <c10/util/quint4x2.h> #include <c10/util/quint8.h> #include <c10/util/reverse_iterator.h> #include <c10/util/safe_numerics.h> #include <c10/util/string_utils.h> #include <c10/util/string_view.h> #include <c10/util/typeid.h> #include <ATen/core/DeprecatedTypeProperties.h> #include <ATen/core/DeprecatedTypePropertiesRegistry.h> #include <ATen/core/DimVector.h> #include <ATen/core/Dimname.h> #include <ATen/core/Generator.h> #include <ATen/core/NamedTensor.h> #include <ATen/core/QuantizerBase.h> #include <ATen/core/TensorAccessor.h> #include <ATen/core/TensorBase.h> #include <ATen/core/symbol.h> #pragma pop_macro("TORCH_ASSERT_NO_OPERATORS")
5,407
29.727273
118
h
null
pytorch-main/aten/src/ATen/core/CheckMemoryFormat.h
#include <c10/core/TensorOptions.h> namespace c10 { namespace impl { inline c10::optional<MemoryFormat> check_tensor_options_and_extract_memory_format( const TensorOptions& options, c10::optional<MemoryFormat> memory_format) { TORCH_CHECK( options.requires_grad_opt() == c10::nullopt || options.requires_grad_opt().value() == false, "Operators taking TensorOptions cannot take a TensorOptions with " "options.requires_grad set as true. This isn't implemented yet."); TORCH_CHECK( !(options.has_memory_format() && memory_format.has_value()), "Cannot set memory_format both in TensorOptions and explicit argument; please delete " "the redundant setter."); if (memory_format.has_value()) { return memory_format; } else { return options.memory_format_opt(); } } }} // namespace impl namespace c10
864
32.269231
92
h
null
pytorch-main/aten/src/ATen/core/DeprecatedTypeProperties.h
#pragma once #include <c10/core/Backend.h> #include <c10/core/ScalarType.h> #include <c10/core/Layout.h> #include <c10/core/TensorOptions.h> #include <c10/core/Storage.h> #include <ATen/core/DeprecatedTypePropertiesRegistry.h> #include <ATen/core/Generator.h> namespace at { class Tensor; // This class specifies a Backend and a ScalarType. Currently, it primarily // serves as a replacement return value for Tensor::type(). Previously, // Tensor::type() returned Type&, but we are changing Type to not be // dtype-specific. class TORCH_API DeprecatedTypeProperties { public: DeprecatedTypeProperties(Backend backend, ScalarType scalar_type) : backend_(backend), scalar_type_(scalar_type) {} Backend backend() const { return backend_; } Layout layout() const { return layout_from_backend(backend_); } bool is_sparse() const { return layout_from_backend(backend()) == kSparse; } bool is_sparse_csr() const { return layout_from_backend(backend()) == kSparseCsr; } c10::DeviceType device_type() const { return backendToDeviceType(backend_); } bool is_cuda() const { return backendToDeviceType(backend_) == kCUDA; } ScalarType scalarType() const { return scalar_type_; } caffe2::TypeMeta typeMeta() const { return scalarTypeToTypeMeta(scalar_type_); } bool operator==(const DeprecatedTypeProperties& other) const { return backend_ == other.backend() && scalar_type_ == other.scalarType(); } bool operator!=(const DeprecatedTypeProperties& other) const { return !(*this == other); } std::string toString() const { std::string base_str; if (backend_ == Backend::Undefined || scalar_type_ == ScalarType::Undefined) { base_str = "UndefinedType"; } else { base_str = std::string(at::toString(backend_)) + at::toString(scalar_type_) + "Type"; } return base_str; } DeprecatedTypeProperties & toBackend(Backend b) const { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( b, scalar_type_); } DeprecatedTypeProperties & toScalarType(ScalarType s) const { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( backend_, s); } DeprecatedTypeProperties & cpu() const { return toBackend(Backend::CPU); } DeprecatedTypeProperties & cuda() const { return toBackend(Backend::CUDA); } DeprecatedTypeProperties & hip() const { return toBackend(Backend::HIP); } /// Constructs the `TensorOptions` from a type and a `device_index`. TensorOptions options(int16_t device_index = -1) const { return TensorOptions().dtype(typeMeta()) .device(device_type(), static_cast<c10::DeviceIndex>(device_index)) .layout(layout()); } /// Constructs the `TensorOptions` from a type and a Device. Asserts that /// the device type matches the device type of the type. TensorOptions options(c10::optional<Device> device_opt) const { if (!device_opt.has_value()) { return options(-1); } else { Device device = device_opt.value(); AT_ASSERT(device.type() == device_type()); return options(device.index()); } } operator TensorOptions() const { return options(); } int64_t id() const { return static_cast<int64_t>(backend()) * static_cast<int64_t>(ScalarType::NumOptions) + static_cast<int64_t>(scalarType()); } Tensor unsafeTensorFromTH(void * th_pointer, bool retain) const; Storage unsafeStorageFromTH(void * th_pointer, bool retain) const; Tensor copy(const Tensor & src, bool non_blocking=false, c10::optional<Device> to_device={}) const; private: Backend backend_; ScalarType scalar_type_; }; } // namespace at
3,778
26.786765
101
h
null
pytorch-main/aten/src/ATen/core/DeprecatedTypePropertiesRegistry.h
#pragma once // In order to preserve bc, we make DeprecatedTypeProperties instances unique // just like they are for Type. #include <c10/core/Backend.h> #include <c10/core/ScalarType.h> namespace at { class DeprecatedTypeProperties; struct TORCH_API DeprecatedTypePropertiesDeleter { void operator()(DeprecatedTypeProperties * ptr); }; class TORCH_API DeprecatedTypePropertiesRegistry { public: DeprecatedTypePropertiesRegistry(); DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) const; private: std::unique_ptr<DeprecatedTypeProperties> registry [static_cast<int>(Backend::NumOptions)] [static_cast<int>(ScalarType::NumOptions)]; }; TORCH_API DeprecatedTypePropertiesRegistry& globalDeprecatedTypePropertiesRegistry(); } // namespace at
795
23.875
87
h
null
pytorch-main/aten/src/ATen/core/Dimname.h
#pragma once #include <ATen/core/symbol.h> #include <c10/util/ArrayRef.h> #include <c10/util/Optional.h> #include <ostream> namespace at { enum class NameType: uint8_t { BASIC, WILDCARD }; struct TORCH_API Dimname { static Dimname fromSymbol(Symbol name); static Dimname wildcard(); static bool isValidName(const std::string& name); NameType type() const { return type_; } Symbol symbol() const { return name_; } bool isBasic() const { return type_ == NameType::BASIC; } bool isWildcard() const { return type_ == NameType::WILDCARD; } bool matches(Dimname other) const; c10::optional<Dimname> unify(Dimname other) const; private: Dimname(Symbol name) : name_(name), type_(NameType::BASIC) {} Dimname(Symbol name, NameType type) : name_(name), type_(type) {} Symbol name_; NameType type_; }; using DimnameList = c10::ArrayRef<Dimname>; TORCH_API std::ostream& operator<<(std::ostream& out, const Dimname& dimname); inline bool operator==(const Dimname& lhs, const Dimname& rhs) { return lhs.symbol() == rhs.symbol(); } inline bool operator!=(const Dimname& lhs, const Dimname& rhs) { return !(lhs == rhs); } } // namespace at
1,178
23.061224
78
h
null
pytorch-main/aten/src/ATen/core/DistributionsHelper.h
#pragma once #include <ATen/core/Array.h> #include <ATen/core/TransformationHelper.h> #include <c10/util/Half.h> #include <c10/util/BFloat16.h> #include <c10/util/MathConstants.h> #include <c10/util/Optional.h> #include <c10/macros/Macros.h> #include <type_traits> #include <limits> #include <cmath> /** * Distributions kernel adapted from THRandom.cpp * The kernels try to follow std::random distributions signature * For instance: in ATen * auto gen = at::detail::createCPUGenerator(); * at::uniform_real_distribution<double> uniform(0, 1); * auto sample = uniform(gen.get()); * * vs std::random * * std::mt19937 gen; * std::uniform_real_distribution uniform(0, 1); * auto sample = uniform(gen); */ namespace at { namespace { /** * Samples a discrete uniform distribution in the range [base, base+range) of type T */ template <typename T> struct uniform_int_from_to_distribution { C10_HOST_DEVICE inline uniform_int_from_to_distribution(uint64_t range, int64_t base) : range_(range), base_(base) {} template <typename RNG> C10_HOST_DEVICE inline T operator()(RNG generator) { if (( std::is_same<T, int64_t>::value || std::is_same<T, double>::value || std::is_same<T, float>::value || std::is_same<T, at::BFloat16>::value) && range_ >= 1ULL << 32) { return transformation::uniform_int_from_to<T>(generator->random64(), range_, base_); } else { return transformation::uniform_int_from_to<T>(generator->random(), range_, base_); } } private: uint64_t range_; int64_t base_; }; /** * Samples a discrete uniform distribution in the range [min_value(int64_t), max_value(int64_t)] */ template <typename T> struct uniform_int_full_range_distribution { template <typename RNG> C10_HOST_DEVICE inline T operator()(RNG generator) { return transformation::uniform_int_full_range<T>(generator->random64()); } }; /** * Samples a discrete uniform distribution in the range [0, max_value(T)] for integral types * and [0, 2^mantissa] for floating-point types. */ template <typename T> struct uniform_int_distribution { template <typename RNG> C10_HOST_DEVICE inline T operator()(RNG generator) { if constexpr (std::is_same_v<T, double> || std::is_same_v<T, int64_t>) { return transformation::uniform_int<T>(generator->random64()); } else { return transformation::uniform_int<T>(generator->random()); } } }; /** * Samples a uniform distribution in the range [from, to) of type T */ template <typename T> struct uniform_real_distribution { C10_HOST_DEVICE inline uniform_real_distribution(T from, T to) { TORCH_CHECK_IF_NOT_ON_CUDA(from <= to); TORCH_CHECK_IF_NOT_ON_CUDA(to - from <= std::numeric_limits<T>::max()); from_ = from; to_ = to; } template <typename RNG> C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){ if constexpr (std::is_same_v<T, double>) { return transformation::uniform_real<T>(generator->random64(), from_, to_); } else { return transformation::uniform_real<T>(generator->random(), from_, to_); } } private: T from_; T to_; }; // The SFINAE checks introduced in #39816 looks overcomplicated and must revisited // https://github.com/pytorch/pytorch/issues/40052 #define DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(member) \ template <typename T> \ struct has_member_##member \ { \ typedef char yes; \ typedef long no; \ template <typename U> static yes test(decltype(&U::member)); \ template <typename U> static no test(...); \ static constexpr bool value = sizeof(test<T>(0)) == sizeof(yes); \ } DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_double_normal_sample); DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_double_normal_sample); DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(next_float_normal_sample); DISTRIBUTION_HELPER_GENERATE_HAS_MEMBER(set_next_float_normal_sample); #define DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(TYPE) \ \ template <typename RNG, typename ret_type, \ typename std::enable_if_t<( \ has_member_next_##TYPE##_normal_sample<RNG>::value && \ has_member_set_next_##TYPE##_normal_sample<RNG>::value \ ), int> = 0> \ C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* generator, ret_type* ret) { \ if (generator->next_##TYPE##_normal_sample()) { \ *ret = *(generator->next_##TYPE##_normal_sample()); \ generator->set_next_##TYPE##_normal_sample(c10::optional<TYPE>()); \ return true; \ } \ return false; \ } \ \ template <typename RNG, typename ret_type, \ typename std::enable_if_t<( \ !has_member_next_##TYPE##_normal_sample<RNG>::value || \ !has_member_set_next_##TYPE##_normal_sample<RNG>::value \ ), int> = 0> \ C10_HOST_DEVICE inline bool maybe_get_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type* /*ret*/) { \ return false; \ } \ \ template <typename RNG, typename ret_type, \ typename std::enable_if_t<( \ has_member_set_next_##TYPE##_normal_sample<RNG>::value \ ), int> = 0> \ C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* generator, ret_type cache) { \ generator->set_next_##TYPE##_normal_sample(cache); \ } \ \ template <typename RNG, typename ret_type, \ typename std::enable_if_t<( \ !has_member_set_next_##TYPE##_normal_sample<RNG>::value \ ), int> = 0> \ C10_HOST_DEVICE inline void maybe_set_next_##TYPE##_normal_sample(RNG* /*generator*/, ret_type /*cache*/) { \ } DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(double); DISTRIBUTION_HELPER_GENERATE_NEXT_NORMAL_METHODS(float); /** * Samples a normal distribution using the Box-Muller method * Takes mean and standard deviation as inputs * Note that Box-muller method returns two samples at a time. * Hence, we cache the "next" sample in the CPUGeneratorImpl class. */ template <typename T> struct normal_distribution { C10_HOST_DEVICE inline normal_distribution(T mean_in, T stdv_in) { TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in >= 0, "stdv_in must be positive: ", stdv_in); mean = mean_in; stdv = stdv_in; } template <typename RNG> C10_HOST_DEVICE inline dist_acctype<T> operator()(RNG generator){ dist_acctype<T> ret; // return cached values if available if constexpr (std::is_same_v<T, double>) { if (maybe_get_next_double_normal_sample(generator, &ret)) { return transformation::normal(ret, mean, stdv); } } else { if (maybe_get_next_float_normal_sample(generator, &ret)) { return transformation::normal(ret, mean, stdv); } } // otherwise generate new normal values uniform_real_distribution<T> uniform(0.0, 1.0); const dist_acctype<T> u1 = uniform(generator); const dist_acctype<T> u2 = uniform(generator); const dist_acctype<T> r = ::sqrt(static_cast<T>(-2.0) * ::log1p(-u2)); const dist_acctype<T> theta = static_cast<T>(2.0) * c10::pi<T> * u1; if constexpr (std::is_same_v<T, double>) { maybe_set_next_double_normal_sample(generator, r * ::sin(theta)); } else { maybe_set_next_float_normal_sample(generator, r * ::sin(theta)); } ret = r * ::cos(theta); return transformation::normal(ret, mean, stdv); } private: T mean; T stdv; }; template <typename T> struct DiscreteDistributionType { using type = float; }; template <> struct DiscreteDistributionType<double> { using type = double; }; /** * Samples a bernoulli distribution given a probability input */ template <typename T> struct bernoulli_distribution { C10_HOST_DEVICE inline bernoulli_distribution(T p_in) { TORCH_CHECK_IF_NOT_ON_CUDA(p_in >= 0 && p_in <= 1); p = p_in; } template <typename RNG> C10_HOST_DEVICE inline T operator()(RNG generator) { uniform_real_distribution<T> uniform(0.0, 1.0); return transformation::bernoulli<T>(uniform(generator), p); } private: T p; }; /** * Samples a geometric distribution given a probability input */ template <typename T> struct geometric_distribution { C10_HOST_DEVICE inline geometric_distribution(T p_in) { TORCH_CHECK_IF_NOT_ON_CUDA(p_in > 0 && p_in < 1); p = p_in; } template <typename RNG> C10_HOST_DEVICE inline T operator()(RNG generator) { uniform_real_distribution<T> uniform(0.0, 1.0); return transformation::geometric<T>(uniform(generator), p); } private: T p; }; /** * Samples an exponential distribution given a lambda input */ template <typename T> struct exponential_distribution { C10_HOST_DEVICE inline exponential_distribution(T lambda_in) : lambda(lambda_in) {} template <typename RNG> C10_HOST_DEVICE inline T operator()(RNG generator) { uniform_real_distribution<T> uniform(0.0, 1.0); return transformation::exponential<T>(uniform(generator), lambda); } private: T lambda; }; /** * Samples a cauchy distribution given median and sigma as inputs */ template <typename T> struct cauchy_distribution { C10_HOST_DEVICE inline cauchy_distribution(T median_in, T sigma_in) : median(median_in), sigma(sigma_in) {} template <typename RNG> C10_HOST_DEVICE inline T operator()(RNG generator) { uniform_real_distribution<T> uniform(0.0, 1.0); return transformation::cauchy<T>(uniform(generator), median, sigma); } private: T median; T sigma; }; /** * Samples a lognormal distribution * Takes mean and standard deviation as inputs * Outputs two samples at a time */ template <typename T> struct lognormal_distribution { C10_HOST_DEVICE inline lognormal_distribution(T mean_in, T stdv_in) { TORCH_CHECK_IF_NOT_ON_CUDA(stdv_in > 0); mean = mean_in; stdv = stdv_in; } template<typename RNG> C10_HOST_DEVICE inline T operator()(RNG generator){ normal_distribution<T> normal(mean, stdv); return transformation::log_normal<T>(normal(generator)); } private: T mean; T stdv; }; } } // namespace at
12,578
36.215976
119
h
null
pytorch-main/aten/src/ATen/core/Formatting.h
#pragma once #include <ostream> #include <string> #include <c10/core/Scalar.h> #include <ATen/core/Tensor.h> namespace c10 { TORCH_API std::ostream& operator<<(std::ostream& out, Backend b); TORCH_API std::ostream& operator<<(std::ostream & out, const Scalar& s); TORCH_API std::string toString(const Scalar& s); } namespace at { TORCH_API std::ostream& operator<<(std::ostream& out, const DeprecatedTypeProperties& t); TORCH_API std::ostream& print( std::ostream& stream, const Tensor& tensor, int64_t linesize); static inline std::ostream& operator<<(std::ostream & out, const Tensor & t) { return print(out,t,80); } TORCH_API void print(const Tensor & t, int64_t linesize=80); }
700
25.961538
89
h
null
pytorch-main/aten/src/ATen/core/GeneratorForPrivateuseone.h
#pragma once #include <ATen/core/Generator.h> #include <c10/util/intrusive_ptr.h> namespace at { using GeneratorFuncType = std::function<at::Generator(c10::DeviceIndex)>; c10::optional<GeneratorFuncType>& GetGeneratorPrivate(); class TORCH_API _GeneratorRegister { public: explicit _GeneratorRegister(GeneratorFuncType func); }; TORCH_API at::Generator GetGeneratorForPrivateuse1( c10::DeviceIndex device_index); /** * This is used to register Generator to PyTorch for `privateuse1` key. * * Usage: REGISTER_GENERATOR_PRIVATEUSE1(MakeGeneratorForPrivateuse1) * * class CustomGeneratorImpl : public c10::GeneratorImpl { * CustomGeneratorImpl(DeviceIndex device_index = -1); * explicit ~CustomGeneratorImpl() override = default; * ... * }; * * at::Generator MakeGeneratorForPrivateuse1(c10::DeviceIndex id) { * return at::make_generator<CustomGeneratorImpl>(id); * } */ #define REGISTER_GENERATOR_PRIVATEUSE1(GeneratorPrivate) \ static auto temp##GeneratorPrivate = at::_GeneratorRegister(GeneratorPrivate); } // namespace at
1,064
25.625
80
h
null
pytorch-main/aten/src/ATen/core/IListRef.h
#pragma once #include <ATen/core/ivalue_to.h> #include <c10/util/ArrayRef.h> #include <c10/util/Exception.h> #include <functional> #include <initializer_list> #include <iterator> #include <type_traits> /* * [Note: IListRef] * Wrapper around different API containers (e.g. boxed and unboxed). * * What is it? * =========== * It is a tagged union of both boxed and unboxed API containers. * Working implementations: * * - `IListRef<at::Tensor>` * - `IListRef<at::OptionalTensorRef>` * * Note that `IListRef` is a view type. Meaning that it won't own the * tensors it holds. It's intended to be used only as argument parameters. * Specifically, where these 2 worlds overlap. * * What is this for? * ================= * Historically, PyTorch has maintained 2 different APIs: the unboxed * (called from C++ API and Python eager mode) and boxed APIs (called * from the TorchScript JIT, mobile interpreter, and boxed fallbacks). * * Calling unboxed kernels from the boxed "world" and vice-versa may * result in non-negligible overhead. Lists are one of those types: * * - Boxed world: `c10::List` * - Unboxed world: `c10::ArrayRef` * * In this context, `c10::IListRef` solves this problem by wrapping those * 2 container types, so that we don't need to convert from one to * the other. * * (see https://github.com/pytorch/pytorch/issues/66328) * * What does it do? * ================ * This container wraps around the different tagged containers * (currently, only boxed and unboxed), without incurring in extra * overhead for converting from one to another. It does so while * exposing usual container methods, which dispatch to corresponding * implementations. * * While it works with different container types, it introduces * overhead for repeatedly calling member functions (since those will * get dispatched, again). Therefore, you should only use it to iterate * through the list up to one time. If you need to do more complex things, * call `materialize()` first. * * Adding support for a new Tag * ============================ * Suppose we want to add a new tag: `Chest`. Here are the steps * we would have to go through: * * 1. Add a line for it in the macro `TORCH_ILISTREF_FORALL_TAGS`. * * #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ * ... * _(Chest, ##__VA_ARGS__) * * 2. Add type aliases, union members, and constructors. * * template <typename T> * class IListRef { * ... * using chest_type = * typename detail::IListRefTagImpl<T, IListRefTag::Chest>::list_type; * ... * IListRef(...) : tag_(IListRefTag::Chest) { * ... * } * ... * union Payload { * ... * chest_type chest; * ... * }; * ... * }; * * 3. Add a default implementation for it (in 'IListRef_inl.h'). It's * preferable to make the default implementation work for `T = Tensor` * (both `Unboxed` and `Boxed` do it). * * template <typename T, typename ListElemT> * class IListRefTagImplBase<IListRefTag::Chest, T, ListElemT> { * public: * using elem_type = ListElemT; * using list_type = ChestContainer<elem_type>; * * static const list_type& unwrap(const IListRef<T>& ilist) { ... } * * static typename list_type::const_iterator& unwrap( * IListRefIterator<T>& it) { ... } * * static const typename list_type::const_iterator& unwrap( * const IListRefIterator<T>& it) { ... } * * static IListRefConstRef<T> iterator_get( * const typename list_type::const_iterator& it) { ... } * } * * 4. Add an specialization for each of the already supported types. * Finally, for consistency, add them to the tracking list. * (see [Note: IListRefTagImpl Specializations]) * * template <> * class IListRefTagImpl<IListRefTag::Chest, at::Tensor> * : public IListRefTagImplBase<IListRefTag::Chest, at::Tensor> {}; * * Adding support for a new Type * ============================= * Suppose we want to add support for a new type: `Matrix`. * Here are the steps we would have to go through: * * 1. Add an specialization for each of the existing tags. * For consistency, add them to the tracking list. * (see [Note: IListRefTagImpl Specializations]) * * template <> * class IListRefTagImpl<IListRefTag::Unboxed, Matrix> * : public IListRefTagImplBase<IListRefTag::Unboxed, Matrix> {}; * * template <> * class IListRefTagImpl<Matrix, IListRefTag::Boxed> * : public IListRefTagImplBase<IListRefTag::Boxed, Matrix> {}; * * Common Problems * =============== * 1. One of `IListRef(Iterator)` methods are failing to compile. * * That may be happening because the container type you added * is not compatible with the code written for that method. If * that's true, then you might have to transform that code into * a static method call (see `List::operator[]` method). * * 2. Can't make `IListRefIterator<T>::operator*` return a const-reference. * * First, keep in mind that we assume that boxed containers will * have to deal with `IValue` (e.g. `c10::List`). In this context, * what may be happening is that `IValue` doesn't store internally * your type `T`. Instead, it constructs a type new `T` everytime * you try to get `T` for it (see `IListRef<at::OptinalTensorRef>`). */ namespace c10 { template <typename T> class IListRef; /* * Applies arbitrary macros to each `IListRefTag`. */ #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ _(Unboxed, ##__VA_ARGS__) \ _(Boxed, ##__VA_ARGS__) \ _(Materialized, ##__VA_ARGS__) /* * Defines a "switch-case" for `TAG`. Inside, it executes `BODY`, * while bringing to scope: * * - `ImplT`: the implementation class for `TAG` * - `this_`: the result of unwrapping `this` */ #define TORCH_ILISTREF_UNWRAP_CASE(TAG, BODY) \ case c10::IListRefTag::TAG: { \ using ImplT = c10::detail::IListRefTagImpl<IListRefTag::TAG, T>; \ auto& this_ = ImplT::unwrap(*this); \ BODY \ } break; /* * Dispatches the unwrap call, depending on `TAG`, followed by * the execution of `BODY`. It aborts if `TAG` is not a `IListRefTag`. * * This macro is useful because it allows us to handle different * types (that correspond to different tags) to be implemented * only once. We can do it even when the implementation of the * different tags aren't syntatically the same, by dispatching * it to a function (e.g. `ImplT::<dispatch-function>(this_)`). */ #define TORCH_ILISTREF_UNWRAP(TAG, BODY) \ switch (TAG) { \ TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \ break; \ default: \ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \ } enum class IListRefTag { #define DEFINE_TAG(tag, ...) tag, TORCH_ILISTREF_FORALL_TAGS(DEFINE_TAG) #undef DEFINE_TAG None }; namespace detail { /* * Type alias that specifies whether we return a reference or a copy of `T`. * * What is this for? * ================= * Since values in the boxed world are represented by an `IValue`, we also * depend on whether it can be converted to a const-reference (`Tensor`) or * has to create a new copy of `T` (`OptionalTensorRef`). */ template <typename T> using IListRefConstRef = typename ivalue_to_const_ref_overload_return<T>::type; /* * Interface that implements key functions for each `IListRefTag` type. * * What is this for? * ================= * Given an `IListRef(Iterator)<T>`, some methods have to be implemented * differently for each `TAG`. Therefore, the methods inside this class * are used as dispatch targets for the different `IListRefTag` values. * * You should create an specialization of this class for each possible * combination of `IListRefTag` type (except `None`) and element types * (e.g. `Tensor`). * * What does it do? * ================ * 1. defines static methods to be used as dispatch targets by both * `IListRef<T>` and `IListRefIterator<T>` (see the implementation of * `IListRefTagImplBase`). * * 2. defines the `elem_type` and `list_type` aliases that will be * used in the definition of `IListRef<T>`. In general, we should do * so by inheriting from `IListRefTagImplBase<TAG, T, ListElemT>`. * * [Note: IListRefTagImpl Specialization] * ====================================== * For `IListRef(Iterator)<at::Tensor>`: * - <IListRefTag::Unboxed, at::Tensor> * - <IListRefTag::Boxed, at::Tensor> * - <IListRefTag::Materialized, at::Tensor> * * For `IListRef(Iterator)<at::OptionalTensorRef>`: * - <IListRefTag::Unboxed, at::OptionalTensorRef> * - <IListRefTag::Boxed, at::OptionalTensorRef> * - <IListRefTag::Materialized, at::OptionalTensorRef> */ template <IListRefTag TAG, typename T> class IListRefTagImpl {}; /* * Base implementation of `IListRefTagImpl<TAG, T>` methods. * * What is this for? * ================= * This should make adding specializations for new types easier. For * example, one should be able to add a new type just by making its * `IListRefTagImpl` specialization inherit from `IListRefTagImplBase`. * * You should create a partial specialization for this class only if * you introduce a new `IListRefTag`. The idea being that there is one * default implementation for each possible value of `IListRefTag`. * * What does it do? * ================ * 1. defines `elem_type` as an alias to `ListElemT`. * * 1. defines `list_type` as an alias to the default container type * that will hold a collection of `elem_type`. The idea being that * all types tagged as `TAG` will have `list_type` as its container, * with different `elem_type`. * * 3. defines the default implementation for each of the methods that * are supposed to be defined on `IListRefTagImpl` specializations. * * 4. inheriting from `IListRefTagImplBase<TAG, T, ListElemT>` also means * that the payload of the type `IListRef<T>` will be of type `list_type` * when it is tagged as `TAG`. */ template <IListRefTag TAG, typename T, typename ListElemT = T> class IListRefTagImplBase {}; /* * Materialized container for `IListRef<T>`. * * What is this for? * ================= * Container that groups `T` references together. This exchanges the * overhead of every method call from `IListRef<T>` for a dynamic allocation. * * You should use this container instead of `IListRef<T>` if: * * - You are going to iterate the list more than once * - You need to repeatedly access arbitrary elements (using `operator[]`) * What does it do? * ================ * Removes the reference (&) from the type, and wraps it into a * `std::reference_wrapper`. If `IListRefConstRef<T>` is not a * reference type, then it's left unchanged. */ template <typename T> using _MaterializedIListRefElem = typename std::conditional< std::is_reference<T>::value, typename std::reference_wrapper<typename std::remove_reference<T>::type>, T>::type; template <typename T> using MaterializedIListRefElem = _MaterializedIListRefElem<IListRefConstRef<T>>; template <typename T> using MaterializedIListRef = std::vector<MaterializedIListRefElem<T>>; } // namespace detail /* * Iterator for `IListRef<T>`. * * What is it? * =========== * Currently, a `std::bidirectional_iterator` that wraps the iterator * types defined for each of the `IListRefTag`. * * One should be able to use it, as if it were the unwrapped * iterators themselves. * What does it do? * ================ * Similarly to `IListRef<T>`, this is a wrapper class. Specifically, it * wraps each container's `const_iterator` type alias. So, for example, * given that the container for `IListRefTag::Boxed` is `c10::List`, this * iterator will wrap a `c10::List::const_iterator`. * * [Note: MSVC Iterator Debug] * =========================== * MSVC `vector<T>::iterator` implementation (used in the boxed variant) * makes it so this union's destructor, copy-constructor (assignment), and * move-constructor (assignment) are implicitly deleted. * * Therefore, we need to explicitly define them as needed. Follows a list * of places where these are needed and their reason: * * - `Payload` destructor: * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is set to 2. * * - `IListRefIterator` destructor: * same as above. However, we need to explicitly call the variant * destructor explicitly. * * - `IListRefIterator` copy-constructor: * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is different * than 0. */ template <typename T> class IListRefIterator { private: #define DEFINE_FRIEND_CLASS(TAG, ...) \ friend class detail::IListRefTagImpl<IListRefTag::TAG, T>; \ friend class detail::IListRefTagImplBase< \ IListRefTag::TAG, \ T, \ typename detail::IListRefTagImpl<IListRefTag::TAG, T>::elem_type>; TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS) #undef DEFINE_FRIEND_CLASS public: // C++17 friendly std::iterator implementation using iterator_category = std::bidirectional_iterator_tag; using value_type = T; using difference_type = std::ptrdiff_t; using pointer = T*; using reference = T&; using unboxed_iterator_type = typename detail:: IListRefTagImpl<IListRefTag::Unboxed, T>::list_type::const_iterator; using boxed_iterator_type = typename detail:: IListRefTagImpl<IListRefTag::Boxed, T>::list_type::const_iterator; using materialized_iterator_type = typename detail::MaterializedIListRef<T>::const_iterator; IListRefIterator() : tag_(IListRefTag::None) {} #if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL != 0 // See [Note: MSVC Iterator Debug] IListRefIterator(const IListRefIterator& iterator) : tag_(iterator.tag_) { switch (tag_) { case IListRefTag::Boxed: payload_.boxed_iterator = iterator.payload_.boxed_iterator; break; case IListRefTag::Unboxed: payload_.unboxed_iterator = iterator.payload_.unboxed_iterator; break; case IListRefTag::Materialized: payload_.materialized_iterator = iterator.payload_.materialized_iterator; break; default: TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); } } #endif #if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL == 2 // See [Note: MSVC Iterator Debug] ~IListRefIterator() noexcept(false) { switch (tag_) { case IListRefTag::Boxed: payload_.boxed_iterator.~boxed_iterator_type(); break; case IListRefTag::Unboxed: payload_.unboxed_iterator.~unboxed_iterator_type(); break; case IListRefTag::Materialized: payload_.materialized_iterator.~materialized_iterator_type(); break; default: TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); } } #endif IListRefIterator(boxed_iterator_type boxed) : tag_(IListRefTag::Boxed) { payload_.boxed_iterator = boxed; } IListRefIterator(unboxed_iterator_type unboxed) : tag_(IListRefTag::Unboxed) { payload_.unboxed_iterator = unboxed; } IListRefIterator(materialized_iterator_type materialized) : tag_(IListRefTag::Materialized) { payload_.materialized_iterator = materialized; } detail::IListRefConstRef<T> operator*() const { TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::iterator_get(this_); }); } IListRefIterator& operator++() { TORCH_ILISTREF_UNWRAP(tag_, { ++this_; }); return *this; } IListRefIterator operator++(int) { auto old = *this; TORCH_ILISTREF_UNWRAP(tag_, { ++this_; }); return old; } IListRefIterator& operator--() { TORCH_ILISTREF_UNWRAP(tag_, { --this_; }); return *this; } IListRefIterator operator--(int) { auto old = *this; TORCH_ILISTREF_UNWRAP(tag_, { --this_; }); return old; } bool operator==(const IListRefIterator& rhs) const { if (tag_ != rhs.tag_) { return false; } TORCH_ILISTREF_UNWRAP(tag_, { auto& rhs_it = ImplT::unwrap(rhs); return this_ == rhs_it; }); } bool operator!=(const IListRefIterator& rhs) const { return !(*this == rhs); } private: union Payload { boxed_iterator_type boxed_iterator; unboxed_iterator_type unboxed_iterator; materialized_iterator_type materialized_iterator; void* _init_ptr; Payload() : _init_ptr(nullptr) {} #if defined(_MSC_VER) // See [Note: MSVC Iterator Debug] ~Payload() {} #endif }; Payload payload_; IListRefTag tag_; }; /* * See [Note: IListRef] */ template <typename T> class IListRef { private: #define DEFINE_FRIEND_CLASS(TAG, ...) \ friend class detail::IListRefTagImpl<IListRefTag::TAG, T>; \ friend class detail::IListRefTagImplBase< \ IListRefTag::TAG, \ T, \ typename detail::IListRefTagImpl<IListRefTag::TAG, T>::elem_type>; TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS) #undef DEFINE_FRIEND_CLASS public: using unboxed_type = typename detail::IListRefTagImpl<IListRefTag::Unboxed, T>::list_type; using boxed_type = typename detail::IListRefTagImpl<IListRefTag::Boxed, T>::list_type; using materialized_type = typename detail::MaterializedIListRef<T>; using iterator = IListRefIterator<T>; using const_iterator = IListRefIterator<T>; using reverse_iterator = std::reverse_iterator<iterator>; using value_type = typename iterator::value_type; IListRef() : tag_(IListRefTag::None) {} IListRef(const boxed_type& boxed) : tag_(IListRefTag::Boxed) { payload_.boxed = &boxed; } IListRef(const unboxed_type& unboxed) : tag_(IListRefTag::Unboxed) { payload_.unboxed = unboxed; } IListRef(const std::initializer_list<T>& list) : tag_(IListRefTag::Unboxed) { payload_.unboxed = at::ArrayRef<T>(list); } template < typename... UnboxedConstructorArgs, typename = std::enable_if_t< std::is_constructible<unboxed_type, UnboxedConstructorArgs...>::value>> IListRef(UnboxedConstructorArgs&&... args) : tag_(IListRefTag::Unboxed) { payload_.unboxed = unboxed_type(std::forward<UnboxedConstructorArgs>(args)...); } IListRef(const materialized_type& materialized) : tag_(IListRefTag::Materialized) { payload_.materialized = &materialized; } size_t size() const { TORCH_ILISTREF_UNWRAP(tag_, { return this_.size(); }); } bool empty() const { return size() == 0; } iterator begin() const { TORCH_ILISTREF_UNWRAP(tag_, { return this_.begin(); }); } iterator end() const { TORCH_ILISTREF_UNWRAP(tag_, { return this_.end(); }); } detail::IListRefConstRef<T> front() const { TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::front(this_); }); } /* * Materializes the `IListRef` into a `std::vector`. * * This should be used when one wishes to either: * * - iterate over the list more than once: each `IListRefIterator` * member function call has to go through a switch, introducing * non-negligible overhead * * - randomly access an arbitrary element using `operator[]`: * same reason as above */ detail::MaterializedIListRef<T> materialize() const { if (isMaterialized()) { return toMaterialized(); } detail::MaterializedIListRef<T> materialized; materialized.reserve(size()); for (const auto& t : *this) { materialized.emplace_back(t); } return materialized; } #define DEFINE_CHECK(TAG, ...) \ bool is##TAG() const { \ return tag_ == IListRefTag::TAG; \ } TORCH_ILISTREF_FORALL_TAGS(DEFINE_CHECK); #undef DEFINE_CHECK bool isNone() const { return tag_ == IListRefTag::None; } #define DEFINE_CASTING(TAG, ...) \ const typename detail::IListRefTagImpl<IListRefTag::TAG, T>::list_type& \ to##TAG() const { \ TORCH_INTERNAL_ASSERT(is##TAG()); \ return detail::IListRefTagImpl<IListRefTag::TAG, T>::unwrap(*this); \ } TORCH_ILISTREF_FORALL_TAGS(DEFINE_CASTING); #undef DEFINE_CASTING private: union Payload { const boxed_type* boxed; unboxed_type unboxed; const materialized_type* materialized; Payload() : boxed(nullptr) {} ~Payload() {} }; Payload payload_; IListRefTag tag_; }; } // namespace c10 #include <ATen/core/IListRef_inl.h>
20,992
32.164297
95
h
null
pytorch-main/aten/src/ATen/core/IListRef_inl.h
#pragma once #include <ATen/core/List.h> #include <ATen/core/Tensor.h> namespace at { class Tensor; class OptionalTensorRef; } namespace c10 { namespace detail { /* * Specializations of `IListRefTagImplBase` that implement the default * implementation for `IListRefTag::Unboxed`. */ template <typename T, typename ListElemT> class IListRefTagImplBase<IListRefTag::Unboxed, T, ListElemT> { public: using elem_type = ListElemT; using list_type = ArrayRef<elem_type>; /* * These `unwrap` static methods unwraps the inner containers out * of `IListRef<T>` (and `IListRefIterator<T>`). They are required when * the macro `TORCH_ILISTREF_UNWRAP` is called. */ static const list_type& unwrap(const IListRef<T>& ilist) { return ilist.payload_.unboxed; } static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) { return it.payload_.unboxed_iterator; } static const typename list_type::const_iterator& unwrap( const IListRefIterator<T>& it) { return it.payload_.unboxed_iterator; } /* * We have these function (besides the `unwrap`s above) because the * implementation for both `IListRef::operator[]` and `IListRefIterator::operator*` * weren't syntatically equal for the existing tags at the time * (`Unboxed` and `Boxed`). */ static IListRefConstRef<T> front(const list_type& lst) { return lst.front(); } static IListRefConstRef<T> iterator_get( const typename list_type::const_iterator& it) { return *it; } }; /* * Specializations of `IListRefTagImplBase` that implement the default * implementation for `IListRefTag::Boxed`. */ template <typename T, typename ListElemT> class IListRefTagImplBase<IListRefTag::Boxed, T, ListElemT> { public: using elem_type = ListElemT; using list_type = List<elem_type>; static const list_type& unwrap(const IListRef<T>& ilist) { return *ilist.payload_.boxed; } static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) { return it.payload_.boxed_iterator; } static const typename list_type::const_iterator& unwrap( const IListRefIterator<T>& it) { return it.payload_.boxed_iterator; } static IListRefConstRef<T> front(const list_type& lst) { return lst[0]; } static IListRefConstRef<T> iterator_get( const typename list_type::const_iterator& it) { return (*it).get().toTensor(); } }; /* * Specializations of `IListRefTagImplBase` that implement the default * implementation for `IListRefTag::Materialized`. */ template <typename T> class IListRefTagImplBase<IListRefTag::Materialized, T, MaterializedIListRefElem<T>> { public: using elem_type = MaterializedIListRefElem<T>; using list_type = MaterializedIListRef<T>; static const list_type& unwrap(const IListRef<T>& ilist) { return *ilist.payload_.materialized; } static typename list_type::const_iterator& unwrap(IListRefIterator<T>& it) { return it.payload_.materialized_iterator; } static const typename list_type::const_iterator& unwrap( const IListRefIterator<T>& it) { return it.payload_.materialized_iterator; } static IListRefConstRef<T> front(const list_type& lst) { return lst[0]; } static IListRefConstRef<T> iterator_get( const typename list_type::const_iterator& it) { return *it; } }; /* * [Note: ITensorListRef] * Specializations necessary for `IListRef<at::Tensor>` type. * * Since the default implementations are usually done with supporting * `Tensor` in mind, we only have to inherit from the base implementations. */ template <> class IListRefTagImpl<IListRefTag::Unboxed, at::Tensor> : public IListRefTagImplBase<IListRefTag::Unboxed, at::Tensor> {}; template <> class IListRefTagImpl<IListRefTag::Boxed, at::Tensor> : public IListRefTagImplBase<IListRefTag::Boxed, at::Tensor> {}; template <> class IListRefTagImpl<IListRefTag::Materialized, at::Tensor> : public IListRefTagImplBase< IListRefTag::Materialized, at::Tensor, MaterializedIListRefElem<at::Tensor>> {}; /* * [Note: IOptTensorListRef] * Specializations necessary for `IListRef<at::OptionalTensorRef>` type. * * We can't get an `at::OptionalTensorRef` directly from an instance of * `List<optional<Tensor>>` (the type that corresponds to the boxed world). * * So, the default implementation won't help us. Thus, we have to implement * this method ourselves. */ template <> class IListRefTagImpl<IListRefTag::Unboxed, at::OptionalTensorRef> : public IListRefTagImplBase<IListRefTag::Unboxed, at::OptionalTensorRef> {}; template <> class IListRefTagImpl<IListRefTag::Boxed, at::OptionalTensorRef> : public IListRefTagImplBase<IListRefTag::Boxed, at::OptionalTensorRef, optional<at::Tensor>> { public: /* * Given an instance of the types corresponding to the `Boxed` tag, we override * the default implementation, so that we can return a `at::OptionalTensorRef`. */ static IListRefConstRef<at::OptionalTensorRef> iterator_get( const typename list_type::const_iterator& it) { const auto& ivalue = (*it).get(); if (!ivalue.isNone()) { const auto& tensor = ivalue.toTensor(); return (tensor.defined()) ? tensor : at::OptionalTensorRef{}; } return {}; } }; template <> class IListRefTagImpl<IListRefTag::Materialized, at::OptionalTensorRef> : public IListRefTagImplBase< IListRefTag::Materialized, at::OptionalTensorRef, MaterializedIListRefElem<at::OptionalTensorRef>> {}; } // namespace detail } // namespace c10 namespace at { // [Note: ITensorListRef] using ITensorListRef = c10::IListRef<at::Tensor>; using ITensorListRefIterator = c10::IListRefIterator<at::Tensor>; using MaterializedITensorListRef = c10::detail::MaterializedIListRef<at::Tensor>; // [Note: IOptTensorListRef] using IOptTensorListRef = c10::IListRef<at::OptionalTensorRef>; using IOptTensorListRefIterator = c10::IListRefIterator<at::OptionalTensorRef>; using MaterializedIOptTensorListRef = c10::detail::MaterializedIListRef<at::OptionalTensorRef>; } // namespace at
6,127
29.336634
99
h
null
pytorch-main/aten/src/ATen/core/LegacyTypeDispatch.h
#pragma once // The legacy mechanism for dispatching operators in ATen is a Type // object, which is essentially a giant virtual dispatch table // for every operation we support dynamically dispatching over. // // This has been deprecated in favor of ATenDispatch, and in the future, // c10 dispatcher. // TODO: Clean up what remains here #include <c10/core/impl/LocalDispatchKeySet.h> namespace at { // A RAII, thread local (!) guard that will disable dispatch to variable // handler. // // NOTE [ Treating Variables as non-Variables in type dispatch ] // // What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes // dispatches on ATen functions to go to the non-variable implementation, // bypassing autograd handling (and also profiling and tracing). // // To understand why this guard exists, it's helpful to understand the history // behind how Variable was implemented. Previously, Variables were implemented // as a wrapper on Tensors; so the act of processing a Variable involved // unwrapping the underlying Tensor, and then calling the underlying base // operation on /that/ operation // // However, after the Variable/Tensor merge, there is no concept of unwrapping // a tensor anymore. If you just call the operation on the same variable // again inside your VariableType handler, you'll dispatch back to // VariableType, which is not what we want. // // The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which // when enabled will cause `legacyTensorType()` and `getType()` to always return // non-Variable type, even if the tensor being called on is a variable. /* Note [AutoDispatchBelowAutograd] * AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used * for kernel implementations and customized C++ kernels. * If you are looking for a guard to run workload in inference mode, please use * c10::InferenceMode RAII which is user facing API. * In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode) * was used in the user code for inference-only workload, this was under risk of * producing wrong results silently in some edge cases. For example: * ``` * torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true); * torch::Tensor out = s * s; * { * at::AutoDispatchBelowAutograd guard; * s.add_(1); // Skips version bump on `s`. * } * // WRONG GRADIENT! s.grad() are now computed using `s` value after the * // inplace update. * out.backward(torch::ones_like(out)); * ``` * Users should use `c10::InferenceMode` here so that it'll properly throw an * error saying "one of the variables needed for gradient computation has be modified." */ struct TORCH_API AutoDispatchBelowAutograd { AutoDispatchBelowAutograd() : autograd_guard_(c10::autograd_dispatch_keyset) { } // disable all autograd dispatch keys c10::impl::ExcludeDispatchKeyGuard autograd_guard_; }; // TODO: AutoNonVariableTypeMode should be removed in release 1.10. struct TORCH_API AutoNonVariableTypeMode { AutoNonVariableTypeMode(bool enabled = true) : autograd_guard_(c10::autograd_dispatch_keyset) { TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. " "For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, " "If you are looking for a user facing API to enable running your inference-only " "workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code " "is under risk of producing silent wrong result in some edge cases. " "See Note [AutoDispatchBelowAutograd] for more details."); TORCH_INTERNAL_ASSERT(enabled); } // disable all autograd dispatch keys c10::impl::ExcludeDispatchKeyGuard autograd_guard_; }; struct TORCH_API AutoDispatchSkipFunctionalize { AutoDispatchSkipFunctionalize() : dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) { } c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_; }; /* Note [AutoDispatchBelowADInplaceOrView] * AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode * before we split inplace & view ops out of VariableType kernel. * Note this guard is used in VariableType kernels for functional ops * as well as ADInplaceOrView kernels for inplace/view ops to enforce the * Invariant: * Once you are in VariableType/ADInplaceOrView kernel for an op, * you never go back to a kernel on same dispatch key until * you finish the current op. */ struct TORCH_API AutoDispatchBelowADInplaceOrView { AutoDispatchBelowADInplaceOrView() : dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) { } // disable Autograd & ADInplaceOrView dispatch keys c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_; }; } // namespace at
4,857
42.375
103
h
null
pytorch-main/aten/src/ATen/core/MT19937RNGEngine.h
#pragma once #include <c10/util/irange.h> // define constants like M_PI and C keywords for MSVC #ifdef _MSC_VER #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES #endif #include <math.h> #endif #include <array> #include <cmath> #include <cstdint> namespace at { constexpr int MERSENNE_STATE_N = 624; constexpr int MERSENNE_STATE_M = 397; constexpr uint32_t MATRIX_A = 0x9908b0df; constexpr uint32_t UMASK = 0x80000000; constexpr uint32_t LMASK = 0x7fffffff; /** * Note [Mt19937 Engine implementation] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Originally implemented in: * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/MTARCOK/mt19937ar-cok.c * and modified with C++ constructs. Moreover the state array of the engine * has been modified to hold 32 bit uints instead of 64 bits. * * Note that we reimplemented mt19937 instead of using std::mt19937 because, * at::mt19937 turns out to be faster in the pytorch codebase. PyTorch builds with -O2 * by default and following are the benchmark numbers (benchmark code can be found at * https://github.com/syed-ahmed/benchmark-rngs): * * with -O2 * Time to get 100000000 philox randoms with at::uniform_real_distribution = 0.462759s * Time to get 100000000 at::mt19937 randoms with at::uniform_real_distribution = 0.39628s * Time to get 100000000 std::mt19937 randoms with std::uniform_real_distribution = 0.352087s * Time to get 100000000 std::mt19937 randoms with at::uniform_real_distribution = 0.419454s * * std::mt19937 is faster when used in conjunction with std::uniform_real_distribution, * however we can't use std::uniform_real_distribution because of this bug: * http://open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#2524. Plus, even if we used * std::uniform_real_distribution and filtered out the 1's, it is a different algorithm * than what's in pytorch currently and that messes up the tests in tests_distributions.py. * The other option, using std::mt19937 with at::uniform_real_distribution is a tad bit slower * than at::mt19937 with at::uniform_real_distribution and hence, we went with the latter. * * Copyright notice: * A C-program for MT19937, with initialization improved 2002/2/10. * Coded by Takuji Nishimura and Makoto Matsumoto. * This is a faster version by taking Shawn Cokus's optimization, * Matthe Bellew's simplification, Isaku Wada's real version. * * Before using, initialize the state by using init_genrand(seed) * or init_by_array(init_key, key_length). * * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. The names of its contributors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * Any feedback is very welcome. * http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html * email: m-mat @ math.sci.hiroshima-u.ac.jp (remove space) */ /** * mt19937_data_pod is used to get POD data in and out * of mt19937_engine. Used in torch.get_rng_state and * torch.set_rng_state functions. */ struct mt19937_data_pod { uint64_t seed_; int left_; bool seeded_; uint32_t next_; std::array<uint32_t, MERSENNE_STATE_N> state_; }; class mt19937_engine { public: inline explicit mt19937_engine(uint64_t seed = 5489) { init_with_uint32(seed); } inline mt19937_data_pod data() const { return data_; } inline void set_data(const mt19937_data_pod& data) { data_ = data; } inline uint64_t seed() const { return data_.seed_; } inline bool is_valid() { if ((data_.seeded_ == true) && (data_.left_ > 0 && data_.left_ <= MERSENNE_STATE_N) && (data_.next_ <= MERSENNE_STATE_N)) { return true; } return false; } inline uint32_t operator()() { uint32_t y; if (--(data_.left_) == 0) { next_state(); } y = *(data_.state_.data() + data_.next_++); y ^= (y >> 11); y ^= (y << 7) & 0x9d2c5680; y ^= (y << 15) & 0xefc60000; y ^= (y >> 18); return y; } private: mt19937_data_pod data_; inline void init_with_uint32(uint64_t seed) { data_.seed_ = seed; data_.seeded_ = true; data_.state_[0] = seed & 0xffffffff; for (const auto j : c10::irange(1, MERSENNE_STATE_N)) { data_.state_[j] = (1812433253 * (data_.state_[j-1] ^ (data_.state_[j-1] >> 30)) + j); } data_.left_ = 1; data_.next_ = 0; } inline uint32_t mix_bits(uint32_t u, uint32_t v) { return (u & UMASK) | (v & LMASK); } inline uint32_t twist(uint32_t u, uint32_t v) { return (mix_bits(u,v) >> 1) ^ (v & 1 ? MATRIX_A : 0); } inline void next_state() { uint32_t* p = data_.state_.data(); data_.left_ = MERSENNE_STATE_N; data_.next_ = 0; for(int j = MERSENNE_STATE_N - MERSENNE_STATE_M + 1; --j; p++) { *p = p[MERSENNE_STATE_M] ^ twist(p[0], p[1]); } for(int j = MERSENNE_STATE_M; --j; p++) { *p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], p[1]); } *p = p[MERSENNE_STATE_M - MERSENNE_STATE_N] ^ twist(p[0], data_.state_[0]); } }; typedef mt19937_engine mt19937; } // namespace at
6,458
31.954082
94
h
null
pytorch-main/aten/src/ATen/core/NamedTensor.h
#pragma once #include <ATen/core/Dimname.h> #include <c10/core/TensorImpl.h> #include <c10/util/C++17.h> namespace at { class TensorBase; // XXX: This file exists because TensorImpl is in c10, but Dimname is in ATen. // Due to the c10/ATen library split, TensorImpl cannot depend on Dimname, // so we have a couple of workarounds. // // In the long term, we'll move Dimname to c10 and everything in this file // can be refactored out. The main blocker for that is that "c10::Symbol" // actually exists outside of c10 and needs to be moved in. // TensorImpl has a unique_ptr<NamedTensorMetaInterface> field. // XXX: Ideally we would just put optional<vector<Dimname>> into TensorImpl. // // This class has an important invariant: there must be at least ONE // non-wildcard struct TORCH_API NamedTensorMeta final : public c10::NamedTensorMetaInterface { // This enum is to remind people that the invariant on constructors is that // the list of dimnames must have at least one non-wildcard enum HAS_NON_WILDCARD { HasNonWildcard }; explicit NamedTensorMeta(HAS_NON_WILDCARD, DimnameList names) : names_(names.vec()) { check_invariants(); } explicit NamedTensorMeta(HAS_NON_WILDCARD, std::vector<Dimname>&& names) : names_(std::move(names)) { check_invariants(); } std::unique_ptr<c10::NamedTensorMetaInterface> clone() const override { return std::make_unique<NamedTensorMeta>(HasNonWildcard, names_); } DimnameList names() const { return names_; } // Used for an assertion in TensorImpl.h int64_t slow_dim() const override { return names_.size(); } void check_invariants() const { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( std::any_of(names_.begin(), names_.end(), [](const Dimname& n) { return !n.isWildcard(); })); } void set_names(HAS_NON_WILDCARD, DimnameList new_names) { TORCH_INTERNAL_ASSERT(new_names.size() == names_.size()); std::copy(new_names.begin(), new_names.end(), names_.begin()); check_invariants(); } void set_names(HAS_NON_WILDCARD, std::vector<Dimname>&& new_names) { TORCH_INTERNAL_ASSERT(new_names.size() == names_.size()); names_ = std::move(new_names); check_invariants(); } // INVARIANT: at least one Dimname is non-WILDCARD std::vector<Dimname> names_; }; // When NamesMode is disabled, then all operations ignore tensors' names fields. // Concretely speaking, all tensors are treated as having nullopt names. struct TORCH_API NamesMode { static bool is_enabled(); static void set_enabled(bool enabled); }; // A RAII, thread local (!) guard that enables or disables names upon // construction, and sets it back to the original value upon destruction. struct TORCH_API NoNamesGuard { NoNamesGuard() : prev_mode(NamesMode::is_enabled()), initialized(true) { NamesMode::set_enabled(false); } ~NoNamesGuard() { if (initialized) { reset(); } } void reset() { TORCH_INTERNAL_ASSERT(initialized); NamesMode::set_enabled(prev_mode); } private: bool prev_mode; bool initialized; }; void check_names_valid_for(const TensorBase& tensor, DimnameList names); void check_names_valid_for(size_t tensor_dim, DimnameList names); // Sets the names of `tensor` to be `names`. TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, c10::optional<DimnameList> names); TORCH_API const TensorBase& internal_set_names_inplace(const TensorBase& tensor, std::vector<Dimname>&& names, bool validate_names); constexpr size_t kMaxNamedTensorDim = 64; DimnameList default_names(size_t len); namespace impl { // Some helper functions on TensorImpl. Useful for working with names in TH. // XXX: Ideally these would exist as methods on TensorImpl TORCH_API void internal_set_names_inplace(TensorImpl* impl, c10::optional<DimnameList> names, bool validate_names); TORCH_API void internal_set_names_inplace(TensorImpl* impl, std::vector<Dimname>&& names, bool validate_names); void check_names_valid_for(TensorImpl* impl, DimnameList names); // Returns true if the tensor's names exist and are not all 'None'. // Returns false if the tensor's names don't exist (were not allocated), // or if all names are 'None'. // We treat not-allocated-names the same as allocated names that are all 'None'. TORCH_API bool has_names(const TensorImpl* impl); // Returns the names of the tensor's dimensions. // Unnamed tensors are treated as having 'None' in all dimension; this method // would return a DimnameList of all 'None's for an unnamed tensor. TORCH_API DimnameList get_names(const TensorImpl* impl); // This is more of an implementation detail; one should use impl::get_names / // Tensor::names() whenever possible because it provides a cleaner API. // Returns the names of the tensor if they have been allocated; returns nullopt // instead if the haven't been. The names of a tensor are not allocated if a // tensor is constructed with names=None. TORCH_API c10::optional<DimnameList> get_opt_names(const TensorImpl* impl); } // namespace impl } // namespace at
5,050
34.822695
132
h
null
pytorch-main/aten/src/ATen/core/PythonOpRegistrationTrampoline.h
#pragma once #include <ATen/core/dispatch/Dispatcher.h> // TODO: this can probably live in c10 namespace at { namespace impl { class TORCH_API PythonOpRegistrationTrampoline final { static std::atomic<c10::impl::PyInterpreter*> interpreter_; public: // Returns true if you successfully registered yourself (that means // you are in the hot seat for doing the operator registrations!) static bool registerInterpreter(c10::impl::PyInterpreter*); }; } // namespace impl } // namespace at
501
22.904762
70
h
null
pytorch-main/aten/src/ATen/core/QuantizerBase.h
#pragma once #include <c10/core/ScalarType.h> #include <c10/core/QScheme.h> #include <c10/util/intrusive_ptr.h> namespace at { class Tensor; struct QTensorImpl; struct Quantizer; using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&; using QuantizerPtr = c10::intrusive_ptr<Quantizer>; /** * Quantizer is the class for storing all the information * that's necessary to perform quantize and dequantize * operation. * * We might have different types of quantization schemes and this is * the base class for all quantizers. * * QTensorImpl will hold a pointer to Quantizer so that we can support * different quantization schemes on Tensor. * * For example, the most common quantization scheme, Affine Quantization, * requires scale and zero_point as parameters, we'll store scale and zero_point * inside the instance and we can use it to quantize a float Tensor or * dequantize a quantized Tensor. * * When you add new types of leaf Quantizer class, please also * make sure to add a corresponding QScheme enum since * they should have one to one mapping. * * Note about intrusive_ptr: * Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can * share the same Quantizer. Quantizer should be immutable. */ struct TORCH_API Quantizer : public c10::intrusive_ptr_target { const ScalarType scalar_type_; explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {} ~Quantizer() override; // Copied from torch/csrc/jit/ir/scope.h QuantizerPtr intrusive_from_this() { c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer // from a raw `this` pointer // so we need to bump the refcount // to account for this ownership return c10::intrusive_ptr<Quantizer>::reclaim(this); } /** * Each concrete Quantizer type should have a unique QScheme type. */ virtual QScheme qscheme() const = 0; ScalarType scalar_type() const { return scalar_type_; } /** * quantize a float Tensor into a quantized Tensor. */ virtual Tensor quantize(const Tensor& t) = 0; /** * dequantize a quantized Tensor into a float Tensor. */ virtual Tensor dequantize(const Tensor& t) = 0; /** * dequantize a quantized Tensor into a float Tensor, out= variant */ virtual Tensor& dequantize_out(Tensor& out, const Tensor& t) = 0; /** * Compare against `other` for equality. */ virtual bool equalTo(QuantizerPtr other) const = 0; }; } // namespace at
2,606
30.035714
80
h
null
pytorch-main/aten/src/ATen/core/Tensor.h
#pragma once #include <ATen/core/TensorBody.h> #include <c10/util/Exception.h> namespace at { class TORCH_API OptionalTensorRef { public: OptionalTensorRef() = default; ~OptionalTensorRef() { ref_.unsafeReleaseTensorImpl(); } OptionalTensorRef(const TensorBase& src) : ref_(Tensor::unsafe_borrow_t{}, src) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(src.defined()); } OptionalTensorRef(const OptionalTensorRef& rhs) : ref_(Tensor::unsafe_borrow_t{}, rhs.ref_) {} OptionalTensorRef& operator=(OptionalTensorRef rhs) { std::swap(ref_, rhs.ref_); return *this; } bool has_value() const { return ref_.defined(); } const Tensor& getTensorRef() const & { return ref_; } const Tensor& operator*() const & { return ref_; } const Tensor* operator->() const & { return &ref_; } operator bool() const { return ref_.defined(); } private: Tensor ref_; }; template <typename T> auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_void_t<T> { // Return the grad argument in case of a hook with void return type to have an // std::function with Tensor return type static_assert(std::is_same<decltype(hook(Tensor())), void>::value, "Expected hook to return void"); return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) { OptionalTensorRef grad(grad_base); fn(*grad); return Tensor(); }); } template <typename T> auto Tensor::register_hook(T&& hook) const -> Tensor::hook_return_var_t<T> { return _register_hook([fn=std::forward<T>(hook)](const TensorBase& grad_base) { OptionalTensorRef grad(grad_base); Tensor ret = fn(*grad); return TensorBase(std::move(ret)); }); } } // namespace at
1,756
22.426667
81
h
null
pytorch-main/aten/src/ATen/core/TensorAccessor.h
#pragma once #include <c10/macros/Macros.h> #include <c10/util/ArrayRef.h> #include <c10/util/Deprecated.h> #include <c10/util/Exception.h> #include <c10/util/irange.h> #include <stdint.h> #include <cstddef> namespace at { // The PtrTraits argument to the TensorAccessor/GenericPackedTensorAccessor // is used to enable the __restrict__ keyword/modifier for the data // passed to cuda. template <typename T> struct DefaultPtrTraits { typedef T* PtrType; }; #if defined(__CUDACC__) || defined(__HIPCC__) template <typename T> struct RestrictPtrTraits { typedef T* __restrict__ PtrType; }; #endif // TensorAccessorBase and TensorAccessor are used for both CPU and CUDA tensors. // For CUDA tensors it is used in device code (only). This means that we restrict ourselves // to functions and types available there (e.g. IntArrayRef isn't). // The PtrTraits argument is only relevant to cuda to support `__restrict__` pointers. template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> class TensorAccessorBase { public: typedef typename PtrTraits<T>::PtrType PtrType; C10_HOST_DEVICE TensorAccessorBase( PtrType data_, const index_t* sizes_, const index_t* strides_) : data_(data_), sizes_(sizes_), strides_(strides_) {} C10_HOST IntArrayRef sizes() const { return IntArrayRef(sizes_,N); } C10_HOST IntArrayRef strides() const { return IntArrayRef(strides_,N); } C10_HOST_DEVICE index_t stride(index_t i) const { return strides_[i]; } C10_HOST_DEVICE index_t size(index_t i) const { return sizes_[i]; } C10_HOST_DEVICE PtrType data() { return data_; } C10_HOST_DEVICE const PtrType data() const { return data_; } protected: PtrType data_; const index_t* sizes_; const index_t* strides_; }; // The `TensorAccessor` is typically instantiated for CPU `Tensor`s using // `Tensor.accessor<T, N>()`. // For CUDA `Tensor`s, `GenericPackedTensorAccessor` is used on the host and only // indexing on the device uses `TensorAccessor`s. template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> class TensorAccessor : public TensorAccessorBase<T,N,PtrTraits,index_t> { public: typedef typename PtrTraits<T>::PtrType PtrType; C10_HOST_DEVICE TensorAccessor( PtrType data_, const index_t* sizes_, const index_t* strides_) : TensorAccessorBase<T, N, PtrTraits, index_t>(data_,sizes_,strides_) {} C10_HOST_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) { return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1); } C10_HOST_DEVICE const TensorAccessor<T, N-1, PtrTraits, index_t> operator[](index_t i) const { return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i,this->sizes_+1,this->strides_+1); } }; template<typename T, template <typename U> class PtrTraits, typename index_t> class TensorAccessor<T,1,PtrTraits,index_t> : public TensorAccessorBase<T,1,PtrTraits,index_t> { public: typedef typename PtrTraits<T>::PtrType PtrType; C10_HOST_DEVICE TensorAccessor( PtrType data_, const index_t* sizes_, const index_t* strides_) : TensorAccessorBase<T, 1, PtrTraits, index_t>(data_,sizes_,strides_) {} C10_HOST_DEVICE T & operator[](index_t i) { // NOLINTNEXTLINE(clang-analyzer-core.NullDereference) return this->data_[this->strides_[0]*i]; } C10_HOST_DEVICE const T & operator[](index_t i) const { return this->data_[this->strides_[0]*i]; } }; // GenericPackedTensorAccessorBase and GenericPackedTensorAccessor are used on for CUDA `Tensor`s on the host // and as // In contrast to `TensorAccessor`s, they copy the strides and sizes on instantiation (on the host) // in order to transfer them on the device when calling kernels. // On the device, indexing of multidimensional tensors gives to `TensorAccessor`s. // Use RestrictPtrTraits as PtrTraits if you want the tensor's data pointer to be marked as __restrict__. // Instantiation from data, sizes, strides is only needed on the host and std::copy isn't available // on the device, so those functions are host only. template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> class GenericPackedTensorAccessorBase { public: typedef typename PtrTraits<T>::PtrType PtrType; C10_HOST GenericPackedTensorAccessorBase( PtrType data_, const index_t* sizes_, const index_t* strides_) : data_(data_) { std::copy(sizes_, sizes_ + N, std::begin(this->sizes_)); std::copy(strides_, strides_ + N, std::begin(this->strides_)); } // if index_t is not int64_t, we want to have an int64_t constructor template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type> C10_HOST GenericPackedTensorAccessorBase( PtrType data_, const source_index_t* sizes_, const source_index_t* strides_) : data_(data_) { for (const auto i : c10::irange(N)) { this->sizes_[i] = sizes_[i]; this->strides_[i] = strides_[i]; } } C10_HOST_DEVICE index_t stride(index_t i) const { return strides_[i]; } C10_HOST_DEVICE index_t size(index_t i) const { return sizes_[i]; } C10_HOST_DEVICE PtrType data() { return data_; } C10_HOST_DEVICE const PtrType data() const { return data_; } protected: PtrType data_; index_t sizes_[N]; index_t strides_[N]; C10_HOST void bounds_check_(index_t i) const { TORCH_CHECK_INDEX( 0 <= i && i < index_t{N}, "Index ", i, " is not within bounds of a tensor of dimension ", N); } }; template<typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> class GenericPackedTensorAccessor : public GenericPackedTensorAccessorBase<T,N,PtrTraits,index_t> { public: typedef typename PtrTraits<T>::PtrType PtrType; C10_HOST GenericPackedTensorAccessor( PtrType data_, const index_t* sizes_, const index_t* strides_) : GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {} // if index_t is not int64_t, we want to have an int64_t constructor template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type> C10_HOST GenericPackedTensorAccessor( PtrType data_, const source_index_t* sizes_, const source_index_t* strides_) : GenericPackedTensorAccessorBase<T, N, PtrTraits, index_t>(data_, sizes_, strides_) {} C10_DEVICE TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) { index_t* new_sizes = this->sizes_ + 1; index_t* new_strides = this->strides_ + 1; return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides); } C10_DEVICE const TensorAccessor<T, N - 1, PtrTraits, index_t> operator[](index_t i) const { const index_t* new_sizes = this->sizes_ + 1; const index_t* new_strides = this->strides_ + 1; return TensorAccessor<T,N-1,PtrTraits,index_t>(this->data_ + this->strides_[0]*i, new_sizes, new_strides); } /// Returns a PackedTensorAccessor of the same dimension after transposing the /// two dimensions given. Does not actually move elements; transposition is /// made by permuting the size/stride arrays. If the dimensions are not valid, /// asserts. C10_HOST GenericPackedTensorAccessor<T, N, PtrTraits, index_t> transpose( index_t dim1, index_t dim2) const { this->bounds_check_(dim1); this->bounds_check_(dim2); GenericPackedTensorAccessor<T, N, PtrTraits, index_t> result( this->data_, this->sizes_, this->strides_); std::swap(result.strides_[dim1], result.strides_[dim2]); std::swap(result.sizes_[dim1], result.sizes_[dim2]); return result; } }; template<typename T, template <typename U> class PtrTraits, typename index_t> class GenericPackedTensorAccessor<T,1,PtrTraits,index_t> : public GenericPackedTensorAccessorBase<T,1,PtrTraits,index_t> { public: typedef typename PtrTraits<T>::PtrType PtrType; C10_HOST GenericPackedTensorAccessor( PtrType data_, const index_t* sizes_, const index_t* strides_) : GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {} // if index_t is not int64_t, we want to have an int64_t constructor template <typename source_index_t, class = typename std::enable_if<std::is_same<source_index_t, int64_t>::value>::type> C10_HOST GenericPackedTensorAccessor( PtrType data_, const source_index_t* sizes_, const source_index_t* strides_) : GenericPackedTensorAccessorBase<T, 1, PtrTraits, index_t>(data_, sizes_, strides_) {} C10_DEVICE T & operator[](index_t i) { return this->data_[this->strides_[0] * i]; } C10_DEVICE const T& operator[](index_t i) const { return this->data_[this->strides_[0]*i]; } // Same as in the general N-dimensional case, but note that in the // 1-dimensional case the returned PackedTensorAccessor will always be an // identical copy of the original C10_HOST GenericPackedTensorAccessor<T, 1, PtrTraits, index_t> transpose( index_t dim1, index_t dim2) const { this->bounds_check_(dim1); this->bounds_check_(dim2); return GenericPackedTensorAccessor<T, 1, PtrTraits, index_t>( this->data_, this->sizes_, this->strides_); } }; // Can't put this directly into the macro function args because of commas #define AT_X GenericPackedTensorAccessor<T, N, PtrTraits, index_t> // Old name for `GenericPackedTensorAccessor` template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> C10_DEFINE_DEPRECATED_USING(PackedTensorAccessor, AT_X) #undef AT_X template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits> using PackedTensorAccessor32 = GenericPackedTensorAccessor<T, N, PtrTraits, int32_t>; template <typename T, size_t N, template <typename U> class PtrTraits = DefaultPtrTraits> using PackedTensorAccessor64 = GenericPackedTensorAccessor<T, N, PtrTraits, int64_t>; } // namespace at
10,373
37
122
h
null
pytorch-main/aten/src/ATen/core/TransformationHelper.h
#include <c10/macros/Macros.h> #include <c10/util/Half.h> #include <c10/util/BFloat16.h> #include <c10/util/MathConstants.h> #include <ATen/NumericUtils.h> #include <limits> #include <cstdint> #include <cassert> namespace at { // Using DistAccumType in accumulate types for distributions. // Note: Ideally we'd be using ATen/AccumulateType.h but looks // like the there is some inconsistency in how accumulate types // are mapped currently, e.g. for the cpu side, float is mapped // to double. template <typename T> struct DistAccumType { }; #if defined(__CUDACC__) || defined(__HIPCC__) template <> struct DistAccumType<half> { using type = float; }; #endif template <> struct DistAccumType<BFloat16> { using type = float; }; template <> struct DistAccumType<Half> { using type = float; }; template <> struct DistAccumType<float> { using type = float; }; template <> struct DistAccumType<double> { using type = double; }; template <typename T> using dist_acctype = typename DistAccumType<T>::type; namespace transformation { /** * A transformation function for `torch.Tensor.random_()`, when both `from` and `to` are specified. * `range` is `to - from` * `base` is `from` */ template <typename T, typename V> C10_HOST_DEVICE inline T uniform_int_from_to(V val, uint64_t range, int64_t base) { return static_cast<T>(static_cast<int64_t>((val % range) + base)); } /** * A transformation function for `torch.Tensor.random_()`, when `from=min_value(int64_t)` and to=None */ template <typename T, typename V> C10_HOST_DEVICE inline T uniform_int_full_range(V val) { return static_cast<T>(static_cast<int64_t>(val)); } /** * A transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`. * In order to prevent compiler warnings reported in GitHub issue 46391, T can't be float or double * in this overloaded version */ template <typename T, typename V> C10_HOST_DEVICE inline typename std::enable_if<!(std::is_floating_point<T>::value), T>::type uniform_int(V val) { if constexpr (std::is_same_v<T, bool>) { return static_cast<bool>(val & 1); } else if constexpr (std::is_same_v<T, int64_t>) { return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1)); } else if constexpr (std::is_same_v<T, at::Half> || std::is_same<T, at::BFloat16>::value) { return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1)); } else if constexpr (std::is_integral_v<T>) { return static_cast<T>(val % (static_cast<uint64_t>(std::numeric_limits<T>::max()) + 1)); } else { assert(false); return 0; } } /** * An overloaded transformation function for `torch.Tensor.random_()`, when used without specifying `from` and `to`, * added to fix compiler warnings reported in GitHub issue 46391. T is either float or double in this version. */ template<typename T, typename V> C10_HOST_DEVICE inline typename std::enable_if<std::is_floating_point<T>::value, T>::type uniform_int(V val) { return static_cast<T>(val % static_cast<uint64_t>((1ULL << std::numeric_limits<T>::digits) + 1)); } template <typename T, typename V> C10_HOST_DEVICE inline dist_acctype<T> uniform_real(V val, T from, T to) { constexpr auto MASK = static_cast<V>((static_cast<uint64_t>(1) << std::numeric_limits<T>::digits) - 1); constexpr auto DIVISOR = static_cast<dist_acctype<T>>(1) / (static_cast<uint64_t>(1) << std::numeric_limits<T>::digits); dist_acctype<T> x = (val & MASK) * DIVISOR; return (x * (to - from) + from); } /** * Transforms normally distributed `val` with mean 0.0 and standard deviation 1.0 to * normally distributed with `mean` and standard deviation `std`. */ template <typename T> C10_HOST_DEVICE inline T normal(T val, T mean, T std) { return val * std + mean; } /** * Transforms uniformly distributed `val` between 0.0 and 1.0 to * Cauchy distribution with location parameter `median` and scale parameter `sigma`. */ template <typename T> C10_HOST_DEVICE inline T cauchy(T val, T median, T sigma) { // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function // __tanf overflows and returns `inf/-inf` when (val > 1 - eps) or (val < 0 + eps), // thus we clip those values. constexpr T eps = std::numeric_limits<T>::epsilon(); constexpr T one_minus_eps = 1 - eps; constexpr T zero_plus_eps = 0 + eps; val = (val > one_minus_eps ? one_minus_eps : val); val = (val < zero_plus_eps ? zero_plus_eps : val); return median + sigma * at::tan(c10::pi<T> * (val - static_cast<T>(0.5))); } template <> C10_HOST_DEVICE inline double cauchy(double val, double median, double sigma) { // https://en.wikipedia.org/wiki/Cauchy_distribution#Cumulative_distribution_function return median + sigma * at::tan(c10::pi<double> * (val - static_cast<double>(0.5))); } /** * Transforms uniformly distributed `val` between 0.0 and 1.0 to * exponentialy distributed with `lambda` parameter of the distribution. */ template <typename T> C10_HOST_DEVICE inline T exponential(T val, T lambda) { // https://en.wikipedia.org/wiki/Exponential_distribution#Generating_exponential_variates // Different implementations for CUDA and CPU to preserve original logic // TODO: must be investigated and unified!!! // https://github.com/pytorch/pytorch/issues/38662 #if defined(__CUDACC__) || defined(__HIPCC__) // BEFORE TOUCHING THIS CODE READ: https://github.com/pytorch/pytorch/issues/16706 // curand_uniform has (0,1] bounds. log(1) is 0 and exponential excludes 0. // we need log to be not 0, and not underflow when converted to half // fast __logf approximation can underflow, so set log to -epsilon/2 for 1 or close to 1 args auto log = val >= static_cast<T>(1.) - std::numeric_limits<T>::epsilon() / 2 ? -std::numeric_limits<T>::epsilon() / 2 : at::log(val); return static_cast<T>(-1.0) / lambda * log; #else return static_cast<T>(-1.0) / lambda * at::log1p(-val); #endif } /** * Transforms uniformly distributed `val` between 0.0 and 1.0 to * geometricaly distributed with success probability `p`. */ template <typename T> C10_HOST_DEVICE inline T geometric(T val, T p) { // https://en.wikipedia.org/wiki/Geometric_distribution#Related_distributions return static_cast<T>(::ceil(at::log(val) / at::log1p(-p))); } /** * Transforms normally distributed `val` to log-normally distributed. */ template <typename T> C10_HOST_DEVICE inline T log_normal(T val) { // https://en.wikipedia.org/wiki/Log-normal_distribution#Mode,_median,_quantiles return at::exp(val); } /** * Transforms uniformly distributed `val` between 0.0 and 1.0 to * bernoulli distributed with success probability `p`. */ template <typename T> C10_HOST_DEVICE inline T bernoulli(T val, T p) { return val < p; } }} // namespace at::transformation
6,855
38.402299
122
h
null
pytorch-main/aten/src/ATen/core/UnsafeFromTH.h
#pragma once #include <ATen/core/Tensor.h> namespace at { inline Tensor unsafeTensorFromTH(void * th_pointer, bool retain) { auto tensor_impl = c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(static_cast<TensorImpl*>(th_pointer)); if (retain && tensor_impl.get() != UndefinedTensorImpl::singleton()) { c10::raw::intrusive_ptr::incref(tensor_impl.get()); } return Tensor(std::move(tensor_impl)); } inline Storage unsafeStorageFromTH(void * th_pointer, bool retain) { if (retain && th_pointer) { c10::raw::intrusive_ptr::incref(static_cast<StorageImpl*>(th_pointer)); } return Storage(c10::intrusive_ptr<StorageImpl>::reclaim(static_cast<StorageImpl*>(th_pointer))); } }
708
31.227273
120
h
null
pytorch-main/aten/src/ATen/core/Variadic.h
#pragma once #include <cstdint> #include <tuple> #include <type_traits> #include <utility> #include <c10/util/ArrayRef.h> #include <ATen/core/List.h> namespace at { // This class allows you to write variadic functions which // call a (possibly overloaded) function on each argument, // in order. This is most commonly used in autogenerated code, // where it is convenient to have a function that can uniformly // take arguments of different types. If your arguments // are homogenous consider using a std::initializer_list instead. // // For examples of this in use, see torch/csrc/utils/variadic.h template <typename F> struct IterArgs { template <typename... Args> inline F& apply() { return self(); } // NB: Use perfect forwarding here, otherwise we'll make value // copies of all arguments! template <typename T, typename... Args> inline F& apply(T&& arg, Args&&... args) { self()(std::forward<T>(arg)); if (self().short_circuit()) { return self(); } else { return apply(std::forward<Args>(args)...); } } // Here are some handy overloads which provide sensible // defaults for container-like structures that one might // be interested in recursing into. You can enable them // by adding: // // using IterArgs<YourStructName>::operator() // // to your struct. These are not enabled by default because // you may be able to process these structures more efficiently // than handling them one-by-one. template <typename T> void operator()(c10::IListRef<T> args) { for (const auto& arg : args) { self()(arg); if (self().short_circuit()) return; } } template <typename T> void operator()(at::ArrayRef<T> args) { for (const auto& arg : args) { self()(arg); if (self().short_circuit()) return; } } template <typename T> void operator()(const torch::List<T>& args) { for (const auto& arg : args) { self()(arg); if (self().short_circuit()) return; } } // NB: we need to specify std::vector manually as C++ won't // do an implicit conversion to make a template deduction go through. template <typename T> void operator()(const std::vector<T>& args) { self()(at::ArrayRef<T>{args}); } constexpr bool short_circuit() const { return false; } private: inline F& self() { return *static_cast<F*>(this); } }; } // namespace torch
2,439
24.416667
71
h
null
pytorch-main/aten/src/ATen/core/Vitals.h
#pragma once #include <cstring> #include <map> #include <memory> #include <ostream> #include <sstream> #include <unordered_map> #include <c10/core/impl/LocalDispatchKeySet.h> namespace at { namespace vitals { TORCH_API bool torchVitalEnabled(); struct TORCH_API TorchVitalAttr { // always initialized to empty std::string value = ""; template <typename T> TorchVitalAttr& operator<<(const T& t) { if (torchVitalEnabled()) { std::stringstream ss; ss << t; value += ss.str(); } return *this; } template <typename T> void write(const T& t, bool force) { if (force || torchVitalEnabled()) { std::stringstream ss; ss << t; value = ss.str(); } } }; struct TORCH_API TorchVital { std::string name; std::unordered_map<std::string, TorchVitalAttr> attrs; explicit TorchVital(std::string n) : name(std::move(n)) {} TorchVital() = delete; TorchVitalAttr& create(const std::string& attr); TorchVitalAttr& create(const std::string& attr, bool force); friend std::ostream& operator<<(std::ostream& os, const TorchVital& dt); ~TorchVital(); }; std::ostream& operator<<(std::ostream& os, TorchVital const& tv); // A way to access vitals by string names instead of by global reference. // This enables access to vitals from the PythonAPI. class TORCH_API APIVitals { public: bool vitals_enabled; // Set any vital sign that was added to the map. bool setVital( const std::string& vital_name, const std::string& attr_name, const std::string& value, bool force = false); std::string readVitals(); APIVitals(); // Ensure this stays a singleton APIVitals(APIVitals const& other) = delete; APIVitals(APIVitals&& other) = delete; APIVitals& operator=(const APIVitals&) = delete; APIVitals& operator=(APIVitals&&) = delete; private: std::unordered_map<std::string, TorchVital> name_map_; }; extern TORCH_API APIVitals VitalsAPI; } // namespace vitals } // namespace at #define TORCH_VITAL_DECLARE(name) \ TORCH_API at::vitals::TorchVital TorchVital_##name; #define TORCH_VITAL_DEFINE(name) \ TORCH_API at::vitals::TorchVital TorchVital_##name(#name); #define TORCH_VITAL_BASE(name) TorchVital_##name #define TORCH_VITAL(name, attr) TORCH_VITAL_BASE(name).create(#attr)
2,305
23.273684
74
h
null
pytorch-main/aten/src/ATen/core/alias_info.h
#pragma once #include <unordered_set> #include <vector> #include <ATen/core/symbol.h> #include <c10/util/Exception.h> #include <c10/util/hash.h> namespace c10 { /** * class AliasInfo * * Data structure to hold aliasing information for an `Argument`. They can be * nested to represent aliasing information on contained types. * * There is a `beforeSet` which describes the aliasing information before the * operator executes, and an `afterSet` that describes aliasing info * after execution. */ class AliasInfo { public: // Symbol for the set that can alias anything static Symbol wildcardSet() { static const Symbol wc = Symbol::fromQualString("alias::*"); return wc; } void setIsWrite(bool isWrite) { isWrite_ = isWrite; } bool isWrite() const { return isWrite_; } void addBeforeSet(Symbol aliasSet) { beforeSets_.insert(aliasSet); } void addAfterSet(Symbol aliasSet) { afterSets_.insert(aliasSet); } const std::unordered_set<Symbol>& beforeSets() const { return beforeSets_; } const std::unordered_set<Symbol>& afterSets() const { return afterSets_; } Symbol beforeSet() const { AT_ASSERT(beforeSets_.size() == 1); return *beforeSets_.begin(); } bool isWildcardBefore() const { return beforeSets_.count(wildcardSet()) != 0; } bool isWildcardAfter() const { return afterSets_.count(wildcardSet()) != 0; } // the alias info for the contained types of the type // e.g. if this is an annotation on List[T], `sets` refers to // the alias sets that the list may be in // while containedTypes()[0] refers to the sets that members of the list // may be in void addContainedType(AliasInfo aliasInfo) { containedTypes_.push_back(std::move(aliasInfo)); } const std::vector<AliasInfo>& containedTypes() const { return containedTypes_; } private: std::unordered_set<Symbol> beforeSets_; std::unordered_set<Symbol> afterSets_; std::vector<AliasInfo> containedTypes_; bool isWrite_ = false; }; inline bool operator==(const AliasInfo& lhs, const AliasInfo& rhs) { return lhs.isWrite() == rhs.isWrite() && lhs.beforeSets() == rhs.beforeSets() && lhs.afterSets() == rhs.afterSets() && lhs.containedTypes() == rhs.containedTypes(); } // this does match the way things are represented in the schema inline std::ostream& operator<<(std::ostream& out, const AliasInfo& aliasInfo) { out << "("; bool first = true; for (const auto& set : aliasInfo.beforeSets()) { if (first) { first = false; } else { out << "|"; } out << set.toUnqualString(); } if (aliasInfo.isWrite()) { out << "!"; } if (aliasInfo.beforeSets() != aliasInfo.afterSets()) { out << " -> "; first = true; for (const auto& set : aliasInfo.afterSets()) { if (first) { first = false; } else { out << "|"; } out << set.toUnqualString(); } } out << ")"; return out; } } // namespace c10 namespace std { template <> struct hash<c10::AliasInfo> { size_t operator()(const c10::AliasInfo& aliasInfo) const { auto hash = std::hash<bool>()(aliasInfo.isWrite()); // NOTE: for unordered_set hashes, we couldn't use hash_combine // because hash_combine is order dependent. Instead, we choose to // use XOR as the combining function as XOR is commutative. size_t before_set_hash_seed = 0; for (auto &e: aliasInfo.beforeSets()) { auto symbol_hash = std::hash<c10::Symbol>()(e); before_set_hash_seed = before_set_hash_seed ^ symbol_hash; } size_t after_set_hash_seed = 0; for (auto &e: aliasInfo.afterSets()) { auto symbol_hash = std::hash<c10::Symbol>()(e); after_set_hash_seed = after_set_hash_seed ^ symbol_hash; } hash = c10::hash_combine(hash, before_set_hash_seed); hash = c10::hash_combine(hash, after_set_hash_seed); for (auto &e: aliasInfo.containedTypes()) { auto contained_type_hash = std::hash<c10::AliasInfo>()(e); hash = c10::hash_combine(hash, contained_type_hash); } return hash; } }; }
4,160
26.375
80
h
null
pytorch-main/aten/src/ATen/core/blob.h
#pragma once #include <cstddef> #include <sstream> #include <type_traits> #include <typeinfo> #include <vector> #include <c10/util/intrusive_ptr.h> #include <c10/util/typeid.h> #include <c10/macros/Macros.h> namespace caffe2 { class Tensor; /** * @brief Blob is a general container that hosts a typed pointer. * * A Blob hosts a pointer as well as its type, and takes charge of deleting it * properly when the blob is deallocated or re-allocated with a new type. A blob * could contain anything, although the most common case is to contain a Tensor. */ class TORCH_API Blob final : public c10::intrusive_ptr_target { public: /** * Initializes an empty Blob. */ Blob() noexcept : meta_(), pointer_(nullptr), has_ownership_(false) {} ~Blob() override { Reset(); } Blob(Blob&& other) noexcept : Blob() { swap(other); } Blob& operator=(Blob&& other) noexcept { Blob(std::move(other)).swap(*this); return *this; } /** * Checks if the content stored in the blob is of type T. */ template <class T> bool IsType() const noexcept { return meta_.Match<T>(); } /** * Returns the meta info of the blob. */ const TypeMeta meta() const noexcept { return meta_; } /** * Returns a printable typename of the blob. */ c10::string_view TypeName() const noexcept { return meta_.name(); } /** * @brief Gets the const reference of the stored object. The code checks if * the stored object is of the desired type. */ // TODO(jerryzh): add a Get(c10::DeviceType) function? template <class T> const T& Get() const { TORCH_INTERNAL_ASSERT( IsType<T>(), "wrong type for the Blob instance. Blob contains ", meta_.name(), " while caller expects ", TypeMeta::TypeName<T>()); // TODO: after we add Get<Tensor>(c10::DeviceType) // and changed all the callsites, we can add // a static assert here to enforce T != Tensor // NOLINTNEXTLINE(clang-analyzer-core.uninitialized.UndefReturn) return *static_cast<const T*>(pointer_); } const void* GetRaw() const noexcept { return pointer_; } void* GetRaw() noexcept { return pointer_; } /** * @brief Gets a mutable pointer to the stored object. * * If the current object is not of the right type, a new object is created * and the old object is freed. Note that type T should have a default * constructor. Otherwise, create the object yourself first, and use * Reset(). */ template <class T> T* GetMutable() { static_assert( std::is_default_constructible<T>::value, "GetMutable can't be called with non-default-constructible types. " "Try using specialized methods"); if (IsType<T>()) { return static_cast<T*>(pointer_); } else { // TODO Re-enable logging // VLOG(1) << "Create new mutable object " << TypeMeta::TypeName<T>(); return Reset<T>(new T()); } } template <class T> T* GetMutableOrNull() { if (IsType<T>()) { return static_cast<T*>(pointer_); } else { return nullptr; } } /** * Sets the underlying object to the allocated one. The Blob then takes over * the ownership of the passed in pointer. If there is already an object in * the Blob, the old object is freed. * * This is used when the underlying class T does not have a default ctor, or * complex initializations needs to be done outside the blob. */ template <class T> T* Reset(T* allocated) { free_(); meta_ = TypeMeta::Make<T>(); pointer_ = static_cast<void*>(allocated); has_ownership_ = true; return allocated; } /** * Sets the underlying object to the allocated one, but does not take over * the ownership of the passed in pointer. If there is already an object in * the Blob, the old object is freed. * * Unlike Reset, this does not take over the ownership of the pointer and the * caller is responsible for making sure that the lifetime of the allocated * blob outlasts the lifetime of any access to this blob, until another Reset * call is made or the blob is destructed. */ template <class T> typename std::remove_const<T>::type* ShareExternal( typename std::remove_const<T>::type* allocated) { return static_cast<T*>(ShareExternal( static_cast<void*>(allocated), TypeMeta::Make<typename std::remove_const<T>::type>())); } void* ShareExternal(void* allocated, const TypeMeta meta) { free_(); meta_ = meta; pointer_ = allocated; has_ownership_ = false; return allocated; } /** * Resets the Blob to an empty one. */ void Reset() { free_(); pointer_ = nullptr; meta_ = TypeMeta(); has_ownership_ = false; } /** * @brief Swaps the underlying storage of two blobs. */ void swap(Blob& rhs) { using std::swap; swap(meta_, rhs.meta_); swap(pointer_, rhs.pointer_); swap(has_ownership_, rhs.has_ownership_); } private: void free_() { if (has_ownership_ && pointer_ != nullptr) { (*meta_.deleteFn())(pointer_); } } TypeMeta meta_; void* pointer_; bool has_ownership_; C10_DISABLE_COPY_AND_ASSIGN(Blob); }; inline void swap(Blob& lhs, Blob& rhs) { lhs.swap(rhs); } inline std::ostream& operator<<(std::ostream& out, const Blob& v) { return out << "Blob[" << v.TypeName() << "]"; } } // namespace caffe2
5,441
24.914286
80
h
null
pytorch-main/aten/src/ATen/core/builtin_function.h
#pragma once #include <ATen/core/function.h> #include <ATen/core/ivalue.h> #include <c10/util/Exception.h> #include <c10/util/intrusive_ptr.h> #include <functional> #include <utility> namespace torch { namespace jit { struct BuiltinOpFunction : public Function { BuiltinOpFunction( c10::QualifiedName qualname, c10::FunctionSchema schema, std::function<void(Stack&)> callable, std::string doc_string = "") : name_(std::move(qualname)), callable_(std::move(callable)), schema_(std::move(schema)), doc_string_(std::move(doc_string)) { TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1); } c10::string_view doc_string() const override { return doc_string_; } void run(Stack& stack) override { callable_(stack); } c10::intrusive_ptr<c10::ivalue::Future> runAsync( Stack& stack, TaskLauncher /* not used */) override { run(stack); auto res = c10::make_intrusive<c10::ivalue::Future>(stack.front().type()); res->markCompleted(std::move(stack.front())); return res; } const c10::QualifiedName& qualname() const override { return name_; } // if this isn't yet defined, run its method_creator function void ensure_defined() override { // nop } const c10::FunctionSchema& getSchema() const override { return schema_; } size_t num_inputs() const override { return schema_.arguments().size(); } Function& setSchema(c10::FunctionSchema schema) override { schema_ = std::move(schema); return *this; } bool call(Stack& stack, c10::optional<size_t>, c10::function_ref<void(const Code&)>) override { run(stack); return false; } bool call(Stack& stack, c10::function_ref<void(const mobile::Code&)>) override { run(stack); return false; } ~BuiltinOpFunction() override = default; private: c10::QualifiedName name_; std::function<void(Stack&)> callable_; c10::FunctionSchema schema_; std::string doc_string_; }; } // namespace jit } // namespace torch
2,044
21.977528
97
h
null
pytorch-main/aten/src/ATen/core/custom_class.h
#pragma once #include <typeindex> #include <memory> #include <c10/macros/Export.h> #include <c10/macros/Macros.h> #include <c10/util/Exception.h> namespace c10 { struct ClassType; using ClassTypePtr = std::shared_ptr<ClassType>; TORCH_API c10::ClassTypePtr getCustomClassTypeImpl(const std::type_index &tindex); template <typename T> const c10::ClassTypePtr& getCustomClassType() { // Classes are never unregistered from getCustomClassTypeMap and the // hash lookup can be a hot path, so just cache. // For the same reason, it's fine If this ends up getting duplicated across // DSO boundaries for whatever reason. static c10::ClassTypePtr cache = getCustomClassTypeImpl( std::type_index(typeid(T))); return cache; } }
744
24.689655
82
h
null
pytorch-main/aten/src/ATen/core/dynamic_type.h
#pragma once #include <memory> #include <type_traits> #include <ATen/core/jit_type_base.h> #include <c10/util/Optional.h> namespace c10 { using DynamicTypeBits = std::uint32_t; #define DYNAMIC_TYPE_BIT(x) (1u << x) constexpr DynamicTypeBits kDynamicCovariantTypeBit = DYNAMIC_TYPE_BIT(31); constexpr DynamicTypeBits kDynamicAnyTypeBit = DYNAMIC_TYPE_BIT(30); constexpr DynamicTypeBits kDynamicNoneTypeBit = DYNAMIC_TYPE_BIT(1); constexpr DynamicTypeBits kDynamicIntTypeBit = DYNAMIC_TYPE_BIT(3); constexpr DynamicTypeBits kDynamicFloatTypeBit = DYNAMIC_TYPE_BIT(4); constexpr DynamicTypeBits kDynamicComplexTypeBit = DYNAMIC_TYPE_BIT(5); constexpr DynamicTypeBits kDynamicListTypeBit = DYNAMIC_TYPE_BIT(7); constexpr DynamicTypeBits kDynamicTupleTypeBit = DYNAMIC_TYPE_BIT(8); constexpr DynamicTypeBits kDynamicClassTypeBit = DYNAMIC_TYPE_BIT(10); #define FORALL_DYNAMIC_TYPES(_) \ _(Tensor, DYNAMIC_TYPE_BIT(0), 1) \ _(None, kDynamicNoneTypeBit, 1) \ _(Bool, DYNAMIC_TYPE_BIT(2), 1) \ _(Int, kDynamicIntTypeBit, 1) \ _(Float, kDynamicFloatTypeBit, 1) \ _(Complex, kDynamicComplexTypeBit, 1) \ _(Number, \ (kDynamicIntTypeBit | kDynamicFloatTypeBit | kDynamicComplexTypeBit), \ 1) \ _(String, DYNAMIC_TYPE_BIT(6), 1) \ _(List, kDynamicListTypeBit, 0) \ _(Tuple, (kDynamicTupleTypeBit | kDynamicCovariantTypeBit), 0) \ _(Dict, DYNAMIC_TYPE_BIT(9), 0) \ _(Class, kDynamicClassTypeBit, 0) \ _(Optional, \ (DYNAMIC_TYPE_BIT(11) | kDynamicNoneTypeBit | kDynamicCovariantTypeBit), \ 0) \ _(AnyList, (kDynamicListTypeBit | kDynamicAnyTypeBit), 1) \ _(AnyTuple, \ (kDynamicTupleTypeBit | kDynamicCovariantTypeBit | kDynamicAnyTypeBit), \ 1) \ _(DeviceObj, DYNAMIC_TYPE_BIT(12), 1) \ _(StreamObj, DYNAMIC_TYPE_BIT(13), 1) \ _(Capsule, DYNAMIC_TYPE_BIT(14), 1) \ _(Generator, DYNAMIC_TYPE_BIT(15), 1) \ _(Storage, DYNAMIC_TYPE_BIT(16), 1) \ _(Var, DYNAMIC_TYPE_BIT(17), 0) \ _(AnyClass, (kDynamicClassTypeBit | kDynamicAnyTypeBit), 1) \ _(QScheme, DYNAMIC_TYPE_BIT(18), 1) \ _(Quantizer, DYNAMIC_TYPE_BIT(19), 1) \ _(AnyEnum, DYNAMIC_TYPE_BIT(20), 1) \ _(RRef, DYNAMIC_TYPE_BIT(21), 0) \ _(Future, DYNAMIC_TYPE_BIT(22), 0) \ _(Await, DYNAMIC_TYPE_BIT(23), 0) \ _(Any, 0xffffffff, 1) #define FORALL_DYNAMIC_TYPES_FAKE(_) \ _(ScalarType, kDynamicIntTypeBit, 1) \ _(Layout, kDynamicIntTypeBit, 1) \ _(SymInt, kDynamicIntTypeBit, 1) \ _(MemoryFormat, kDynamicIntTypeBit, 1) #define FORWARD_DECL_TYPE(NAME, _, __) struct NAME ## Type; FORALL_DYNAMIC_TYPES(FORWARD_DECL_TYPE) FORALL_DYNAMIC_TYPES_FAKE(FORWARD_DECL_TYPE) #undef FORWARD_DECL_TYPE class DynamicType; using DynamicTypePtr = std::shared_ptr<DynamicType>; /** * DynamicType is designed as a low dependency type system for TorchScript. The * existing JIT types are used for both compilation and runtime, which makes * sense for server contexts because we often compile and run the model in * the same process, however this doesn't hold for mobile devices where we * always compiles a model ahead of time, therefore there will be dependencies * which are not needed, but built with mobile runtime causing binary size * bloat, by design. Every basic type like Int, Bool or String will bring their * vtable, typeinfo, constructor, destructor and even more data from their * specializations for STL types to the binary causing a long tail bloat. * * The core problem is about the complexity to implement and maintain a single * type system for both analysis and execution purposes. Although they should * have the exactly same semantics, in practice implement a unified abstraction * adds conceptual and representational overhead for both sides of the world. * * To address the issues, DynamicType implements a minimal subset of JIT types * and uses a generic algorithm to test all subtyping relations. To achieve * this, we assign each dynamic type a single integer tag to represent its * semantics. More specifically, a dynamic type is defined as a set of "control * bits" and "data bits", where control bits describe the special behavior when * testing a type and data bits map to identity of each nominal type. We use bit * operations to perform all the tests. * * For example, a "covariant bit" is a control bit used to describe if a type * is covariant, right now the most used one is tuple type, and in addition to * the control bit, tuple type's data bit is the 8th bit from the LSB. Control * bits start from MSB and data bits start from LSB. * * If two types are equal, then they are subtype of each other, also if the bits * from one type tag is subset of the other tag, it automatically becomes a * subtype of the other. This simplifies the subtyping logic a lot, and over the * long term it is possible to adopt this scheme on the server side as well. * Special cases can be added but they generally should not take too much code * size. * * DynamicType may or may not inherit from c10::Type because it's not the core * requirement of DynamicType to interface with existing JIT types, but we might * want to inherit from c10::Type to reduce the migration cost. */ class DynamicType : public SharedType { using ClassTypePtr = std::shared_ptr<const c10::ClassType>; /** * A implementation detail to support NamedTuple. */ struct LabeledDynamicType { c10::optional<std::string> label; DynamicTypePtr ty; explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {} bool equals(const LabeledDynamicType& other) const; bool isSubtypeOf(const LabeledDynamicType& other) const; }; public: // TODO Change Ptr to DynamicTypePtr when all migrations are done. using Ptr = TypePtr; using ElementType = DynamicType; ~DynamicType() override; struct Arguments { Arguments() = default; Arguments(c10::ArrayRef<TypePtr>); Arguments(const std::vector<c10::string_view>&, c10::ArrayRef<TypePtr>); std::vector<LabeledDynamicType> elems; }; enum class Tag : DynamicTypeBits { #define DYNAMIC_TYPE_ITEM(NAME, VAL, _) NAME = VAL, FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_ITEM) FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_ITEM) #undef DYNAMIC_TYPE_ITEM }; bool equals(const Type& rhs) const override; bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; std::string str() const override; static const TypeKind Kind = TypeKind::DynamicType; static TORCH_API DynamicTypePtr create(Type& ty); explicit DynamicType(Tag, Arguments); explicit DynamicType(Tag, c10::string_view, Arguments); TypePtr containedType(size_t) const override; size_t containedTypeSize() const override; Tag tag() const { return tag_; } const c10::optional<std::string>& name() const { return name_; } const Arguments& arguments() const { return arguments_; } TORCH_API TypeKind dynamicKind() const; // Should be used only on the server side to restore static type information. #ifndef C10_MOBILE TORCH_API #endif TypePtr fallback() const; private: bool symmetric() const override { return false; } friend struct Type; static std::shared_ptr<const DynamicType> create(const Type& ty); DynamicType(const Type& other); bool equals(const DynamicType& other) const; template <typename F> bool compareArguments(const DynamicType& other, F&& f) const { if (arguments_.elems.size() != other.arguments_.elems.size()) { return false; } for (size_t i = 0; i < arguments_.elems.size(); i++) { if (!f(arguments_.elems[i], other.arguments_.elems[i])) { return false; } } return true; } Tag tag_; c10::optional<std::string> name_; union { Arguments arguments_; ClassTypePtr class_; }; }; template <typename T> struct DynamicTypeTrait { C10_NOINLINE static auto tagValue() { TORCH_CHECK(false); return DynamicType::Tag::Any; } }; namespace detail { C10_NOINLINE DynamicTypePtr makeBaseType(DynamicType::Tag tag); } #define DYNAMIC_TYPE_TAG_VALUE(NAME, _, IS_BASE_TYPE) \ template <> \ struct TORCH_API DynamicTypeTrait<NAME##Type> { \ C10_ERASE static auto tagValue() { \ return DynamicType::Tag::NAME; \ } \ static constexpr bool isBaseType = IS_BASE_TYPE; \ template <typename T = const DynamicTypePtr&> \ static std::enable_if_t<isBaseType, T> getBaseType() { \ static auto type = detail::makeBaseType(tagValue()); \ return type; \ } \ }; // namespace c10 FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_TAG_VALUE) FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_TAG_VALUE) #undef DYNAMIC_TYPE_TAG_VALUE } // namespace c10
10,388
42.468619
80
h
null
pytorch-main/aten/src/ATen/core/enum_type.h
#pragma once #include <ATen/core/ivalue.h> #include <utility> namespace c10 { struct EnumType; using EnumTypePtr = std::shared_ptr<EnumType>; using EnumNameValue = std::pair<std::string, IValue>; struct TORCH_API EnumType : public NamedType { friend struct Type; static const TypeKind Kind = TypeKind::EnumType; static EnumTypePtr create( const c10::QualifiedName& qualified_class_name, TypePtr value, std::vector<EnumNameValue> enum_names_values, std::weak_ptr<::torch::jit::CompilationUnit> cu) { switch (value->kind()) { case TypeKind::IntType: case TypeKind::FloatType: case TypeKind::StringType: return EnumTypePtr(new EnumType( qualified_class_name, std::move(value), std::move(enum_names_values), std::move(cu))); default: AT_ERROR( "Cannot create Enum with value type '", value->str(), "', only int, float and string are supported"); } } std::string str() const override { return "Enum<" + annotation_str() + ">"; } std::string repr_str() const override { return str(); } const TypePtr& getValueType() const { return value_type_; } bool equals(const Type& rhs) const override { if (auto* enum_rhs = rhs.castRaw<EnumType>()) { return name().value() == enum_rhs->name().value() && *getValueType() == *(enum_rhs->getValueType()) && this->compilation_unit() == enum_rhs->compilation_unit(); } return false; } bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; std::shared_ptr<const ::torch::jit::CompilationUnit> compilation_unit() const { auto cu = cu_.lock(); return cu; } const QualifiedName qualifiedClassName() const { return name().value(); } at::ArrayRef<TypePtr> containedTypes() const override { return value_type_; } const at::ArrayRef<EnumNameValue> enumNamesValues() const { return enum_names_values_; } private: EnumType( c10::QualifiedName qualified_class_name, TypePtr value_type, std::vector<EnumNameValue> enum_names_values, std::weak_ptr<torch::jit::CompilationUnit> cu) : NamedType(TypeKind::EnumType, std::move(qualified_class_name)), value_type_(std::move(value_type)), enum_names_values_(std::move(enum_names_values)), cu_(std::move(cu)) {} std::string annotation_str_impl( TypePrinter printer = nullptr) const override { (void)printer; // Suppress unused variable warning const auto& n = name().value(); return n.qualifiedName(); } TypePtr value_type_; std::vector<EnumNameValue> enum_names_values_; std::weak_ptr<::torch::jit::CompilationUnit> cu_; }; } // namespace c10
2,801
26.203883
77
h
null
pytorch-main/aten/src/ATen/core/function.h
#pragma once #include <ATen/core/function_schema.h> #include <ATen/core/ivalue.h> #include <ATen/core/qualified_name.h> #include <c10/util/Exception.h> #include <c10/util/FunctionRef.h> namespace c10 { struct FunctionSchema; }; namespace at { TORCH_API void launch(std::function<void()> func); } namespace torch { namespace jit { struct Graph; struct Code; namespace mobile { struct Code; } using Stack = std::vector<at::IValue>; using Kwargs = std::unordered_map<std::string, at::IValue>; struct RecursiveMethodCallError : public std::exception {}; using TaskLauncher = std::function<void(std::function<void()>)>; TORCH_API void preoptimizeGraph(std::shared_ptr<Graph>& graph, bool disable_autocast=false); // A Function is a pure Graph with no implicit `self` object bound. // It contains schema information and the executor that manages the // execution of the function. Method is a wrapper around an // underlying Function that also provides a `self` object. struct TORCH_API Function { virtual c10::string_view doc_string() const { static constexpr c10::string_view no_doc_string = ""; return no_doc_string; } virtual bool isGraphFunction() const { return false; } virtual void run(Stack& stack) = 0; virtual c10::intrusive_ptr<c10::ivalue::Future> runAsync( Stack& /*stack*/, TaskLauncher taskLauncher = at::launch) { (void)taskLauncher; // Suppress unused variable warning TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); return {}; } at::IValue operator()( Stack stack, const Kwargs& kwargs = Kwargs()) { getSchema().checkAndNormalizeInputs(stack, kwargs); run(stack); return stack.front(); } virtual const c10::QualifiedName& qualname() const = 0; const std::string& name() const { return qualname().name(); } // if this isn't yet defined, run its method_creator function virtual void ensure_defined() = 0; virtual const c10::FunctionSchema& getSchema() const = 0; virtual size_t num_inputs() const = 0; virtual Function& setSchema(c10::FunctionSchema schema) = 0; // call() defines how different interpreter implementations interacts with // Function objects. Basically interpreters need to provide a callback to // communicate to Functions what to do if provided a Code object. // Alternatively we could design the signature to return an optional Code // object, but that requires special handling the null case in interpreter // and the fallback behavior is not well defined by interpreter but rather // Function themselves, so a callback approach is more reasonable than // returning values. // If call() returns true, then callback completes successfully, otherwise // call() returns false. // Overload for server interpreter, a bailout size is needed for graph executor. virtual bool call(Stack&, c10::optional<size_t>, c10::function_ref<void(const Code&)>) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); return false; } // Overload for mobile interpreter. virtual bool call(Stack&, c10::function_ref<void(const mobile::Code&)>) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false); return false; } virtual ~Function() = default; }; } // namespace jit } // namespace torch
3,232
28.935185
92
h
null
pytorch-main/aten/src/ATen/core/function_schema.h
#pragma once #include <c10/util/StringUtil.h> #include <c10/util/string_view.h> #include <c10/util/irange.h> #include <ATen/core/jit_type.h> #include <ATen/core/symbol.h> #include <ATen/core/ivalue.h> #include <ATen/core/alias_info.h> #include <ATen/core/operator_name.h> #include <ATen/core/dispatch/OperatorOptions.h> #include <unordered_map> namespace c10 { // schema as used in the compiler for resolving function calls and reporting // errors. These objects should be constructed from C10 schema once those // are available. struct Argument; struct FunctionSchema; using AliasTypeSet = std::vector<TypePtr>; bool operator==(const Argument& lhs, const Argument& rhs); struct Argument { Argument( std::string name = "", TypePtr type = nullptr, c10::optional<int32_t> N = c10::nullopt, c10::optional<IValue> default_value = c10::nullopt, bool kwarg_only = false, c10::optional<AliasInfo> alias_info = c10::nullopt) : Argument(name, type, type, N, default_value, kwarg_only, alias_info) {} Argument( std::string name, TypePtr fake_type, TypePtr real_type, c10::optional<int32_t> N = c10::nullopt, c10::optional<IValue> default_value = c10::nullopt, bool kwarg_only = false, c10::optional<AliasInfo> alias_info = c10::nullopt) : name_(std::move(name)), type_(fake_type ? std::move(fake_type) : TensorType::get()), real_type_(real_type ? std::move(real_type) : type_), N_(std::move(N)), default_value_(std::move(default_value)), alias_info_(alias_info ? std::make_unique<AliasInfo>(std::move(*alias_info)) : nullptr), kwarg_only_(kwarg_only) { // this is an softly-enforced invariant for out arguments. bool is_alias = alias_info_ != nullptr && alias_info_->isWrite(); is_out_ = kwarg_only_ && is_alias; } Argument(Argument&& rhs) noexcept = default; Argument(const Argument& rhs) : name_(rhs.name_), type_(rhs.type_), real_type_(rhs.real_type_), N_(rhs.N_), default_value_(rhs.default_value_), alias_info_(rhs.alias_info_ ? std::make_unique<AliasInfo>(*rhs.alias_info_) : nullptr), kwarg_only_(rhs.kwarg_only_), is_out_(rhs.is_out_) {} Argument& operator=(Argument&& rhs) = default; Argument& operator=(const Argument& rhs) { if (this != &rhs) { name_ = rhs.name_; type_ = rhs.type_; real_type_ = rhs.real_type_; N_ = rhs.N_; default_value_ = rhs.default_value_; alias_info_ = rhs.alias_info_ ? std::make_unique<AliasInfo>(*rhs.alias_info_) : nullptr; kwarg_only_ = rhs.kwarg_only_; is_out_ = rhs.is_out_; } return *this; } const std::string& name() const { return name_; } const TypePtr& type() const { return type_; } // if type() is non-null, this is guaranteed to be non-null (if no real // type was provided, this takes on type()'s value) const TypePtr& real_type() const { return real_type_; } c10::optional<int32_t> N() const { return N_; } const c10::optional<IValue>& default_value() const { return default_value_; } bool kwarg_only() const { return kwarg_only_; } bool is_out() const { return is_out_; } C10_NODISCARD const AliasInfo* alias_info() const { return alias_info_.get(); } bool is_inferred_type() const { bool is_inferred_type = false; TORCH_INTERNAL_ASSERT(type_); if (auto pt = type_->cast<TensorType>()) { if (pt->isInferredType()) { is_inferred_type = true; } } return is_inferred_type; } std::string formatTypeMismatchMsg(const std::string& actual_type) const { std::string inferred_type_hint; if (is_inferred_type()) { inferred_type_hint = c10::str( "Inferred '", name(), "' to be of type 'Tensor' ", "because it was not annotated with an explicit type.\n"); } return c10::str( "Expected a value of type '", type()->repr_str(), "' for argument '", name(), "' but instead found type '", actual_type, "'.\n", inferred_type_hint); } Argument cloneWithType(TypePtr new_type) const { return Argument( name_, std::move(new_type), N_, default_value_, kwarg_only_, alias_info_ ? c10::optional<AliasInfo>(*alias_info_) : c10::nullopt); } // this function checks whether this Argument is backward compatible with // the old one. we consider the following cases are backward compatible: // 1) two arguments are equal // 2) this arg's type should be subtype of old // 3) this arg must provide the same default value if old arg has one, bool isBackwardCompatibleWith( const Argument& old, std::ostream* why_not=nullptr) const; // this function checks whether this Argument is forward compatible with // the old one. we consider the following cases are forward compatible: // 1) two arguments are equal // 2) this arg's type should be subtype of old // 3) this arg must provide the same default value if old arg has one, bool isForwardCompatibleWith( const Argument& old, std::ostream* why_not = nullptr) const; private: std::string name_; TypePtr type_; TypePtr real_type_; // this is ScalarType, not int, e.g. // for list types, an optional statically known length for the list // e.g. for int[3]: type = ListType::ofInts(), N = 3 // If present, this will allow scalars to be broadcast to this length to // become a list. c10::optional<int32_t> N_; c10::optional<IValue> default_value_; // AliasInfo is huge, so let's only allocate memory for it if // necessary (which it isn't during schema parsing on startup, to // give a pertinent example). std::unique_ptr<AliasInfo> alias_info_; // is this only specifiable as a keyword argument? bool kwarg_only_; // marks if the argument is out variant of the schema bool is_out_; }; inline bool operator==(const Argument& lhs, const Argument& rhs) { return lhs.name() == rhs.name() && *lhs.type() == *rhs.type() && lhs.N() == rhs.N() && lhs.default_value() == rhs.default_value() && lhs.kwarg_only() == rhs.kwarg_only() && (lhs.alias_info() == rhs.alias_info() || (lhs.alias_info() != nullptr && rhs.alias_info() != nullptr && *lhs.alias_info() == *rhs.alias_info())); } inline bool operator!=(const Argument& lhs, const Argument& rhs) { return !(lhs == rhs); } enum struct TORCH_API SchemaArgType { input, output }; /** * struct SchemaArgument * * Structure used to represent arguments or returns for a schema. */ struct TORCH_API SchemaArgument { SchemaArgType type; size_t index; SchemaArgument(SchemaArgType tpe, size_t idx) : type(tpe), index(idx) {} bool operator==(const SchemaArgument& rhs) const { return type == rhs.type && index == rhs.index; } }; bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs); struct TORCH_API FunctionSchema { FunctionSchema( std::string name, std::string overload_name, std::vector<Argument> arguments, std::vector<Argument> returns, bool is_vararg = false, bool is_varret = false) : name_({std::move(name), std::move(overload_name)}), arguments_(std::move(arguments)), returns_(std::move(returns)), is_vararg_(is_vararg), is_varret_(is_varret) { checkSchema(); } FunctionSchema( Symbol name, std::string overload_name, std::vector<Argument> arguments, std::vector<Argument> returns, bool is_vararg = false, bool is_varret = false) : FunctionSchema( name.toQualString(), std::move(overload_name), std::move(arguments), std::move(returns), is_vararg, is_varret) { checkSchema(); } // Checks whether this schema is backward compatible with the old one. // The following conditions must be true: // [Function structure] The new schema's name, overload-name, varargs, and // return arity are the same. // [Output Narrowing] The new schema's output type must be the same class // or inherit from the old schema's output type. // [Argument count] The new schema must have at least as many arguments as // the old schema (considering the list of positional and kwargs). // [Arg Compatibility] Every argument in the old schema has a corresponding // argument in the new schema that: // * is at the same position. // * has the same name. // * is either positional, or kwarg and the old argument was kwarg. // * has the same type, or the old argument's type inherits from the // new argument's type. // [Default Values] Every new argument must have a default value. // E.g. // OK f_new(a, b, c=1) => f_old(a, b) // NOK f_new(a, c=1, *, b) => f_old(a, *, b) // OK f_new(a, b, *, c) => f_old(a, *, b, c) // NOK f_new(a, *, b, c) -> f_old(a, b, *, c) // NOK f_new(a, *, c, b) => f_old(a, *, b, c) // OK f_new(a, *, b, c, d=1) => f_old(a, *, b, c) bool isBackwardCompatibleWith( const FunctionSchema& old, std::ostream* why_not = nullptr) const; // Checks whether this schema is forward compatible with the old one. // The following conditions must be true: // [Function structure] The new schema's name, overload-name, varargs, and // return arity are the same. // [Output Narrowing] The new schema's output type must be the same class // or inherit from the old schema's output type. // [Arg Compatibility] Every argument in the old schema has a corresponding // argument in the new schema that: // * is at the same position. // * has the same name. // * is either positional, or kwarg and the old argument was kwarg. // * has the same type, or the old argument's type inherits from the // new argument's type. // [Default Values] Every new argument must have a default value. // Each default value type should NOT be a container type. // [Positioning] All defaults arguments MUST go after either old // default arguments or the end of positional arguments // and right BEFORE all out arguments bool isForwardCompatibleWith( const FunctionSchema& old, std::ostringstream& why_not) const; private: OperatorName name_; std::vector<Argument> arguments_; std::vector<Argument> returns_; // if true then this schema takes an arbitrary number of additional arguments // after the argument specified in arguments // currently this is used primarily to represent 'primitive' operators whose // arguments are not checked by schema bool is_vararg_; bool is_varret_; // if no alias information is directly specified, what kind of "default" // alias information should we infer? // NB: due to alias analysis kind merging, this may be nullopt. Eventually // this should always be set no matter what c10::optional<AliasAnalysisKind> alias_kind_; template <typename T> void checkArg(const IValue& value, const Argument& argument, optional<size_t> pos) const; void checkSchema() const { bool seen_default_arg = false; for (const auto& arg : arguments()) { if (arg.default_value()) { seen_default_arg = true; } else { // we have historically serialized broadcasting lists wo/default values, // so to not break BC allow lists here if (arg.type()->kind() == ListType::Kind) { continue; } TORCH_INTERNAL_ASSERT( !seen_default_arg || arg.kwarg_only(), "Non-default positional argument follows default argument. Parameter ", arg.name(), " in ", *this); } } } public: void dump() const; const OperatorName& operator_name() const { return name_; } const std::string& name() const { return name_.name; } const std::string& overload_name() const { return name_.overload_name; } const std::vector<Argument>& arguments() const { return arguments_; } const std::vector<Argument>& returns() const { return returns_; } bool is_vararg() const { return is_vararg_; } bool is_varret() const { return is_varret_; } bool is_aliasing(const c10::SchemaArgument &argument) const { TORCH_INTERNAL_ASSERT( argument.index < getCorrectList(argument.type).size(), "Invalid index for schema."); const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info(); return aliasInfo; } bool is_mutable() const { return std::any_of( arguments_.cbegin(), arguments_.cend(), [](const Argument& arg) { const AliasInfo* aliasInfo = arg.alias_info(); return aliasInfo && aliasInfo->isWrite(); }); } bool is_mutable(const c10::SchemaArgument &argument) const { TORCH_INTERNAL_ASSERT( argument.index < getCorrectList(argument.type).size(), "Invalid index for schema."); const AliasInfo* aliasInfo = getCorrectList(argument.type)[argument.index].alias_info(); return aliasInfo && aliasInfo->isWrite(); } bool is_mutable(c10::string_view name) const { c10::optional<int> index = argumentIndexWithName(name); TORCH_INTERNAL_ASSERT( index != c10::nullopt, "Schema has no argument named ", name); return is_mutable({c10::SchemaArgType::input, static_cast<size_t>(*index)}); } // Returns whether lhs and rhs may alias directly. // This does not account for cases where lhs or rhs are a container that // may contain elements that alias the other argument. // FunctionSchema::may_contain_alias will include that functionality. bool may_alias(const SchemaArgument& lhs, const SchemaArgument& rhs) const; // Returns whether lhs and rhs may alias directly or whether lhs/rhs are a container // that may contain elements that alias the other argument. // bidirectional = false only returns whether lhs may contain an alias of rhs // while bidirectional = true returns both directions. bool may_contain_alias(const SchemaArgument& lhs, const SchemaArgument& rhs, bool bidirectional = true) const; // Returns whether the two AliasTypeSets contain any similarities // ie: whether the two type sets can alias. bool canAliasTypeSetsAlias(const c10::optional<AliasTypeSet> &lhs, const c10::optional<AliasTypeSet> &rhs) const; // Recursively Finds all contained types within the AliasTypeSet. c10::optional<AliasTypeSet> getAliasTypeSetContainedTypes(const c10::optional<AliasTypeSet> &aliasTypeSet) const; // Similar to mapTypeToAliasTypeSet defined in alias_analysis.cpp. // Used to map types to a type such that all types that can alias will be mapped to the same type. // For example, calling this method on 'Optional[List[int]]' is the same as calling this method // on 'List[int]'. c10::optional<AliasTypeSet> mapTypeToAliasTypeSet(const TypePtr& type) const; // Returns either arguments() or returns() depending on the SchemaArgType // output => returns(), input => arguments() const std::vector<Argument>& getCorrectList(SchemaArgType type) const; c10::optional<int> argumentIndexWithName(c10::string_view name) const { for (const auto i : c10::irange(arguments().size())) { if(name == arguments()[i].name()) return i; } return c10::nullopt; } FunctionSchema cloneWithName(std::string name, std::string overload_name) const { return FunctionSchema( std::move(name), std::move(overload_name), arguments(), returns(), is_vararg(), is_varret() ); } FunctionSchema cloneWithArguments(std::vector<Argument> new_arguments) const { return FunctionSchema( name(), overload_name(), std::move(new_arguments), returns(), is_vararg(), is_varret()); } FunctionSchema cloneWithReturns(std::vector<Argument> new_returns) const { return FunctionSchema( name(), overload_name(), arguments(), std::move(new_returns), is_vararg(), is_varret()); } std::string formatTypeMismatchMsg( const Argument& expected, const std::string& actual_type, c10::optional<size_t> position = c10::nullopt, c10::optional<std::string> value = c10::nullopt) const; FunctionSchema cloneWithRemappedTypes( const std::function<TypePtr(TypePtr)> type_map) const; FunctionSchema cloneWithRealTypes(bool with_symint=true) const; // Check that inputs have the correct types and appends any missing default // values. template <typename T = c10::PlatformType> void checkAndNormalizeInputs( std::vector<IValue>& inputs, const std::unordered_map<std::string, IValue>& kwargs = std::unordered_map<std::string, IValue>{}) const; std::string findErrorInKwargs(const std::vector<std::string>& kwargs) const; bool hasAnyAliasInfo() const { for (const auto& arg : arguments_) { if (arg.alias_info() != nullptr) { return true; } } for (const auto& ret : returns_) { if (ret.alias_info() != nullptr) { return true; } } return false; } // TODO remove the mutation here bool isDefaultAliasAnalysisKind() const { return !alias_kind_; } AliasAnalysisKind aliasAnalysis() const { return alias_kind_.value_or(AliasAnalysisKind::CONSERVATIVE); } void setAliasAnalysis(AliasAnalysisKind v) { alias_kind_ = v; } c10::optional<c10::string_view> getNamespace() const { return name_.getNamespace(); } // Returns true if we successfully set the namespace (as there // was none set, and false otherwise) bool setNamespaceIfNotSet(const char* ns) { return name_.setNamespaceIfNotSet(ns); } // can a function with this schema be substituted for a function of rhs's // schema and have the program typecheck? // as_method - if true, treat this schema as a method and ignore // the first argument, which will be the object in both cases bool isSubtypeOf(const FunctionSchema& rhs, bool as_method, std::ostream* why_not=nullptr) const; }; inline bool operator==(const FunctionSchema& lhs, const FunctionSchema& rhs) { return lhs.name() == rhs.name() && lhs.overload_name() == rhs.overload_name() && lhs.arguments() == rhs.arguments() && lhs.returns() == rhs.returns() && lhs.is_vararg() == rhs.is_vararg() && lhs.is_varret() == rhs.is_varret(); } inline bool operator!=(const FunctionSchema& lhs, const FunctionSchema& rhs) { return !(lhs == rhs); } // print out Argument, which is compatible with FunctionSchema parser // full format: Type(alias)? name=default_value inline std::ostream& operator<<(std::ostream& out, const Argument& arg) { // for adjusting the ? position. // in schema, we have Tensor?(a!) input, and t(a!)?. // however, t?(a!) doesn't work with schema parser. // so we always use Type(alias)? format // real_type versus fake_type: in order to be compatible with FunctionSchema // parser, printing an argument with either MemoryFormat or Layout type should // give us the original schema string, hence printing out real_type. auto type = arg.real_type(); bool is_opt = type->kind() == OptionalType::Kind; auto unopt_type = is_opt ? type->castRaw<OptionalType>()->getElementType() : type; if (unopt_type->kind() == ListType::Kind) { // sized lists get size N from arg, not type auto list = unopt_type->cast<c10::ListType>(); out << list->getElementType()->str(); if (arg.alias_info() && !arg.alias_info()->containedTypes().empty()){ out << arg.alias_info()->containedTypes()[0]; } std::string N = ""; if (arg.N()) { N = std::to_string(*arg.N()); } out << "[" << N << "]"; } else { out << unopt_type->str(); } // print alias info if it has beforeSets. if (arg.alias_info() && !arg.alias_info()->beforeSets().empty()) { out << *arg.alias_info(); } if (is_opt) { out << "?"; } if (!arg.name().empty()) { out << " " << arg.name(); } if (arg.default_value()) { out << "="; if ((type->kind() == c10::TypeKind::StringType || unopt_type->kind() == c10::TypeKind::StringType) && arg.default_value().value().isString()) { printQuotedString(out, arg.default_value().value().toStringRef()); } else if (type->kind() == TypeKind::ListType && type->castRaw<ListType>()->getElementType()->kind() == c10::TypeKind::IntType) { // We want to faithfully replicate JIT schema. // in native_functions.yaml defaults for int arrays with a single value always look like // int[2] stride=1 // instead of // int[2] stride=[1, 1] auto default_val = arg.default_value().value().toIntList(); if (default_val.size() > 1) { auto all_defaults_the_same = true; for (const auto i : c10::irange(1, default_val.size())) { if (default_val[0] != default_val[i]) all_defaults_the_same = false; } if (all_defaults_the_same) { out << default_val[0]; } else { out << arg.default_value().value(); } } else { out << arg.default_value().value(); } } else { out << arg.default_value().value(); } } return out; } inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema); inline std::string toString(const FunctionSchema& schema) { std::ostringstream str; str << schema; return str.str(); } } // namespace c10 namespace std { template<> struct hash<c10::SchemaArgument> { size_t operator()(const c10::SchemaArgument& arg) const { return c10::hash_combine(std::hash<size_t>()(arg.index), std::hash<size_t>()(static_cast<std::size_t>(arg.type))); } }; template<> struct hash<c10::Argument> { size_t operator()(const c10::Argument& arg) const { auto hash = std::hash<std::string>{}(arg.name()); auto type_hash = std::hash<c10::TypePtr>{}(arg.type()); auto kwarg_only_hash = std::hash<bool>{}(arg.kwarg_only()); hash = c10::hash_combine(hash, type_hash); hash = c10::hash_combine(hash, kwarg_only_hash); // hashing optional fields if they exist if (arg.default_value()) { auto default_value_hash = c10::hash<c10::IValue>{}(arg.default_value().value()); hash = c10::hash_combine(hash, default_value_hash); } if (arg.N()) { auto N_hash = std::hash<int64_t>{}(*arg.N()); hash = c10::hash_combine(hash, N_hash); } if (arg.alias_info()) { auto alias_info_hash = std::hash<c10::AliasInfo>{}(*arg.alias_info()); hash = c10::hash_combine(hash, alias_info_hash); } return hash; } }; template<> struct hash<c10::FunctionSchema> { size_t operator()(const c10::FunctionSchema& schema) const { auto hash = std::hash<c10::OperatorName>{}(schema.operator_name()); auto args_hash = c10::hash<std::vector<c10::Argument>>{}(schema.arguments()); auto returns_hash = c10::hash<std::vector<c10::Argument>>{}(schema.returns()); auto is_vararg_hash = std::hash<bool>{}(schema.is_vararg()); auto is_varret_hash = std::hash<bool>{}(schema.is_varret()); hash = c10::hash_combine(hash, args_hash); hash = c10::hash_combine(hash, returns_hash); hash = c10::hash_combine(hash, is_vararg_hash); hash = c10::hash_combine(hash, is_varret_hash); return hash; } }; } // namespace std #include <ATen/core/function_schema_inl.h> // IWYU pragma: keep
23,899
33.788937
133
h
null
pytorch-main/aten/src/ATen/core/function_schema_inl.h
#pragma once #include <iostream> // note: windows build doesn't find symbols in operator files unless // this is a header file namespace c10 { inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema) { // eventually this should look almost identical to python arg parser, but // it is simpler for now to work directly on this schema out << schema.name(); if (!schema.overload_name().empty()) { out << "." << schema.overload_name(); } out << "("; bool seen_kwarg_only = false; for (const auto i : c10::irange(schema.arguments().size())) { if (i > 0) out << ", "; if (schema.arguments()[i].kwarg_only() && !seen_kwarg_only) { out << "*, "; seen_kwarg_only = true; } out << schema.arguments()[i]; } if(schema.is_vararg()) { if(!schema.arguments().empty()) out << ", "; out << "..."; } out << ") -> "; const auto& returns = schema.returns(); /* * We should skip parenthesis if we return a single item and it's not varret, * or we return nothing but varret. * * Need special handling for schema * aten::items.str(Dict(str, t) self) -> (str,t)[] * Even though this schema returns a single item, we need add parenthesis. * The is necessary so the printed schema can be parsed by the C++ SchemaParser * Without the extra parenthesis, the parser sees the first parenthesis in '(str,t)' and mistakenly * treat the return type as a tuple. An alternative is to enhance the Lexer * to lookahead multiple tokens to accurately decide if the return type is * a tuple. */ bool need_paren = !( (returns.size() == 1 && !schema.is_varret()) || (returns.empty() && schema.is_varret())); if (returns.size() == 1 && !schema.is_varret()) { std::stringstream return_ss; return_ss << returns.at(0); auto return_str = return_ss.str(); // enclosing the single return item with parenthesis if the return type // starts with a left parenthesis. // // There are 2 cases // 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'. // without the extra parenthesis, the c++ schem parser can not parse it. // 2. something like '-> ((str, str))'. Need extra parenthesis so the return // type is a single tuple rather than two strings. // PR (https://github.com/pytorch/pytorch/pull/23204) has more context about // this. test_serialize_and_deserialize (https://github.com/pytorch/pytorch/blob/master/test/test_function_schema.py#L15) // also covers this case. if (!return_str.empty() && return_str.front() == '(') { need_paren = true; } } if (need_paren) { out << "("; } for (const auto i : c10::irange(returns.size())) { if (i > 0) { out << ", "; } out << returns.at(i); } if (schema.is_varret()) { if (!returns.empty()) { out << ", "; } out << "..."; } if (need_paren) { out << ")"; } return out; } inline size_t findFirstOutArg(const std::vector<Argument>& args) { // find the start of out args in the schema for (const auto out_start_idx : c10::irange(args.size())) { if (args.at(out_start_idx).is_out()) { return out_start_idx; } } return args.size(); } inline bool Argument::isBackwardCompatibleWith( const Argument& old, std::ostream* why_not) const { const Argument* lhs = this; const Argument* rhs = &old; if (!(lhs->name() == rhs->name() && lhs->N() == rhs->N() && (lhs->alias_info() == rhs->alias_info() || (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr && *lhs->alias_info() == *rhs->alias_info())))) { return false; } if (lhs->kwarg_only() && !rhs->kwarg_only()) { return false; } if (!rhs->type()->isSubtypeOfExt(*lhs->type(), why_not)) { return false; } if (rhs->default_value().has_value() && lhs->default_value() != rhs->default_value()) { return false; } return true; } inline bool Argument::isForwardCompatibleWith( const Argument& old, std::ostream* why_not) const { const Argument* lhs = this; const Argument* rhs = &old; if (!(lhs->name() == rhs->name() && lhs->N() == rhs->N() && (lhs->alias_info() == rhs->alias_info() || (lhs->alias_info() != nullptr && rhs->alias_info() != nullptr && *lhs->alias_info() == *rhs->alias_info())))) { return false; } if (lhs->kwarg_only() && !rhs->kwarg_only()) { return false; } if (!lhs->type()->isSubtypeOfExt(rhs->type(), why_not)) { return false; } if (rhs->default_value().has_value() && lhs->default_value() != rhs->default_value()) { return false; } if (lhs->default_value().has_value() && !rhs->default_value().has_value()) { return false; } return true; } inline std::string FunctionSchema::formatTypeMismatchMsg( const Argument& expected, const std::string& actual_type, c10::optional<size_t> position, c10::optional<std::string> value) const { std::string position_str; if (position) { position_str = c10::str("Position: ", *position, "\n"); } std::string value_str; if (value) { value_str = c10::str("Value: ", *value, "\n"); } return c10::str( name(), "() ", expected.formatTypeMismatchMsg(actual_type), position_str, value_str, "Declaration: ", *this); } inline bool FunctionSchema::isBackwardCompatibleWith( const FunctionSchema& old, std::ostream* why_not) const { if (!(name() == old.name() && overload_name() == old.overload_name() // we are conservative on is_vararg and is_varret, // since they are only used by internal operators && is_vararg() == old.is_vararg() && is_varret() == old.is_varret() && returns().size() == old.returns().size() && arguments().size() >= old.arguments().size())) { return false; } for (const auto i : c10::irange(returns().size())) { // Backwards compatibility requires covariance on argument types // (i.e. more generic), and contravariance on return types (i.e. // more specific). if (!old.returns().at(i).isBackwardCompatibleWith( returns().at(i), why_not)) { return false; } } // we want to test both out and default args separately size_t old_out_start_idx = findFirstOutArg(old.arguments()); size_t new_out_start_idx = findFirstOutArg(arguments()); // make sure among the default args, they are backward compatible for (const auto i : c10::irange(old_out_start_idx)) { if (!arguments().at(i).isBackwardCompatibleWith( old.arguments().at(i), why_not)) { return false; } } // Validate that all new arguments provided has a default value for (const auto i : c10::irange(old_out_start_idx, new_out_start_idx)) { if (!arguments().at(i).default_value()) { if (why_not) { *why_not << "Function schema not backward compatible since the new argument '" << arguments().at(i).name() << "' of type " << arguments().at(i).type()->str() << " did not provide a default value."; } return false; } } // now compare the out args for (const auto i : c10::irange(old_out_start_idx, old.arguments().size())) { if (!arguments() .at(i - old_out_start_idx + new_out_start_idx) .isBackwardCompatibleWith(old.arguments().at(i), why_not)) { return false; } } return true; } inline bool FunctionSchema::isForwardCompatibleWith( const FunctionSchema& old, std::ostringstream& why_not) const { if (!(name() == old.name() && overload_name() == old.overload_name() // we are conservative on is_vararg and is_varret, // since they are only used by internal operators && is_vararg() == old.is_vararg() && is_varret() == old.is_varret() && returns().size() == old.returns().size())) { return false; } // we want to test both out and default args separately size_t old_out_start_idx = findFirstOutArg(old.arguments()); size_t new_out_start_idx = findFirstOutArg(arguments()); if (old.arguments().size() - old_out_start_idx != arguments().size() - new_out_start_idx) { if (why_not) { why_not << "Function schema should have the " << "same number of out arguments"; } return false; } // make sure among the default args, they are forward compatible for (size_t i = 0; i < std::min(old_out_start_idx, new_out_start_idx); i++) { if (!arguments().at(i).isForwardCompatibleWith(old.arguments().at(i))) { if (why_not) { why_not << "'" << arguments().at(i).name() << "'" << " is not forward compatible with the older version of the schema"; } return false; } } // Validate that all new arguments provided has a default value for (size_t i = old_out_start_idx; i < new_out_start_idx; ++i) { if (!arguments().at(i).default_value()) { if (why_not) { why_not << "Function schema is not forward compatible since the new argument '" << arguments().at(i).name() << "' of type " << arguments().at(i).type()->str() << " did not provide a default value."; } return false; } auto default_val = arguments().at(i).default_value().value(); if (default_val.isList() || default_val.isGenericDict()) { if (why_not) { why_not << "Function schema is not forward compatible since the new argument '" << arguments().at(i).name() << "' of type " << arguments().at(i).type()->str() << " has a container type " << "as its default value."; } return false; } } // now compare the out args for (size_t i = old_out_start_idx; i < old.arguments().size(); i++) { if (!arguments() .at(i - old_out_start_idx + new_out_start_idx) .isForwardCompatibleWith(old.arguments().at(i))) { if (why_not) { why_not << "Out argument '" << "'" << arguments().at(i).name() << " is not FC with the older version of the schema"; } return false; } } return true; } template<typename T> inline void FunctionSchema::checkArg( const IValue& value, const Argument& argument, optional<size_t> pos) const { if (value.isTensor() && argument.type() == TensorType::get()) { // Fast-path for the common case return; } if (!value.type<T>()->isSubtypeOf(*argument.type())) { TORCH_CHECK( false, formatTypeMismatchMsg( argument, value.type<T>()->repr_str(), pos)); } } inline std::string FunctionSchema::findErrorInKwargs(const std::vector<std::string>& kwargs) const { // First check if any of the kwargs are unknown, i.e. don't match the name of // any argument in the schema. for (const auto& kwarg : kwargs) { if (!std::count_if( arguments().begin(), arguments().end(), [&kwarg](const Argument& argument) { return argument.name() == kwarg; })) { return c10::str( "Unknown keyword argument '", kwarg, "' for operator '", name(), "'. Schema: ", *this); } } // If there are unconsumed kwargs but none of them were unknown, the first // positional argument present in the kwargs is duplicated. for (const auto& argument : arguments()) { if (std::find(kwargs.begin(), kwargs.end(), argument.name()) != kwargs.end()) { AT_ASSERT(!argument.default_value()); return c10::str( "Argument '", argument.name(), "' specified both as positional and ", "keyword argument. Schema: ", *this); } } return ""; } template <typename T> inline void FunctionSchema::checkAndNormalizeInputs( std::vector<IValue>& inputs, const std::unordered_map<std::string, IValue>& kwargs) const { // Do we have more inputs than the schema accepts? TORCH_CHECK( inputs.size() <= arguments().size(), "Expected at most ", arguments().size(), " argument(s) for operator '", name(), "', but received ", inputs.size(), " argument(s). Declaration: ", *this); size_t consumed_kwargs = 0; for (const auto pos : c10::irange(arguments().size())) { const auto& argument = arguments()[pos]; if (pos < inputs.size()) { checkArg<T>(inputs[pos], argument, pos); continue; } auto it = kwargs.find(argument.name()); if (it != kwargs.end()) { checkArg<T>(it->second, argument, nullopt); inputs.push_back(it->second); consumed_kwargs++; continue; } if (argument.default_value()) { inputs.push_back(*argument.default_value()); continue; } AT_ERROR( name(), "() is missing value for argument '", argument.name(), "'. Declaration: ", *this); } if (consumed_kwargs != kwargs.size()) { std::vector<std::string> names; names.reserve(kwargs.size()); for(const auto& k : kwargs) { names.emplace_back(k.first); } throw std::runtime_error(findErrorInKwargs(names)); } } inline FunctionSchema FunctionSchema::cloneWithRemappedTypes( const std::function<TypePtr(TypePtr)> type_map) const { auto update_args = [&](const std::vector<Argument>& args) { std::vector<Argument> new_args; new_args.reserve(args.size()); for(const Argument& arg : args) { new_args.emplace_back(arg.cloneWithType(type_map(arg.type()))); } return new_args; }; return FunctionSchema( name(), overload_name(), update_args(arguments()), update_args(returns()), is_vararg(), is_varret()); } // covariant subtyping of list of Arguments inline bool isSubtypeOfList( ArrayRef<Argument> child, ArrayRef<Argument> parent, std::ostream* why_not) { if (child.size() != parent.size()) { return false; } for (const auto i : c10::irange(child.size())) { const Argument& c = child[i]; const Argument& p = parent[i]; if (c.name() != p.name()) { return false; } if (!c.type()->isSubtypeOfExt(*p.type(), why_not)) { return false; } } return true; } inline bool FunctionSchema::isSubtypeOf( const FunctionSchema& rhs, bool as_method, std::ostream* why_not) const { size_t start = as_method ? 1 : 0; // functions are contravariant in arguments but covariant in returns return isSubtypeOfList( ArrayRef<Argument>(rhs.arguments()).slice(start), ArrayRef<Argument>(arguments()).slice(start), why_not) && isSubtypeOfList(returns(), rhs.returns(), why_not); } } // namespace c10
14,970
29.995859
125
h
null
pytorch-main/aten/src/ATen/core/functional.h
#pragma once #include <vector> #include <c10/util/ArrayRef.h> namespace c10 { // The passed in function must take T by value (T), or by // const reference (const T&); taking T by non-const reference // will result in an error like: // // error: no type named 'type' in 'class std::result_of<foobar::__lambda(T)>' // // No explicit template parameters are required. // Overload for explicit function and ArrayRef template<class F, class T> inline auto fmap(const T& inputs, const F& fn) -> std::vector<decltype(fn(*inputs.begin()))> { std::vector<decltype(fn(*inputs.begin()))> r; r.reserve(inputs.size()); for(const auto & input : inputs) r.push_back(fn(input)); return r; } // C++ forbids taking an address of a constructor, so here's a workaround... // Overload for constructor (R) application template<typename R, typename T> inline std::vector<R> fmap(const T& inputs) { std::vector<R> r; r.reserve(inputs.size()); for(auto & input : inputs) r.push_back(R(input)); return r; } template<typename F, typename T> inline std::vector<T> filter(at::ArrayRef<T> inputs, const F& fn) { std::vector<T> r; r.reserve(inputs.size()); for(auto & input : inputs) { if (fn(input)) { r.push_back(input); } } return r; } template<typename F, typename T> inline std::vector<T> filter(const std::vector<T>& inputs, const F& fn) { return filter<F, T>(static_cast<at::ArrayRef<T>>(inputs), fn); } } // namespace c10
1,460
25.563636
94
h
null
pytorch-main/aten/src/ATen/core/interned_strings.h
#pragma once #include <vector> #include <cstdint> #include <string> #include <unordered_map> #include <algorithm> #include <c10/macros/Macros.h> #include <ATen/core/aten_interned_strings.h> #include <ATen/core/symbol.h> namespace c10 { #define FORALL_NS_SYMBOLS(_) \ _(namespaces, prim) \ _(namespaces, prims) \ _(namespaces, nvprims) \ _(namespaces, aten) \ _(namespaces, cuda) \ _(namespaces, onnx) \ _(namespaces, attr) \ _(namespaces, scope) \ _(namespaces, user) \ _(namespaces, _caffe2) \ _(namespaces, dimname) \ _(namespaces, namespaces) \ _(prim, Assign) \ _(prim, BroadcastingChunk) \ _(prim, BroadcastSizes) \ _(prim, ReductionSizes) \ _(prim, Constant) \ _(prim, ChunkSizes) \ _(prim, ConstantMKLDNNTensor) \ _(prim, BroadcastMKLDNNTensors) \ _(prim, MKLDNNGroup) \ _(prim, MKLDNNHardSwish) \ _(prim, MKLDNNHardSigmoid) \ _(prim, MKLDNNHardTanh) \ _(prim, MKLDNNClamp) \ _(prim, StaticRuntimeCopyOuts) \ _(prim, Drop) \ _(prim, Eval) \ _(prim, Expand) /* onnx */ \ _(prim, FusionGroup) \ _(prim, CudaFusionGroup) \ _(prim, CudaFusionGuard) \ _(prim, oneDNNFusionGroup) \ _(prim, oneDNNFusionGuard) \ _(prim, FunctionalGraph) \ _(prim, add_optional) \ _(prim, view_copy) \ _(prim, permute_copy) \ _(prim, reshape_copy) \ _(prim, squeeze_copy) \ _(prim, t_copy) \ _(prim, transpose_copy) \ _(prim, unsqueeze_copy) \ _(prim, flatten_copy) \ _(prim, expand_copy) \ _(prim, expand_as_copy) \ _(prim, DifferentiableGraph) \ _(prim, TensorExprGroup) \ _(prim, TensorExprDynamicGroup) \ _(prim, StaticSubgraph) \ _(prim, If) \ _(prim, Jump) /* debug */ \ _(prim, JumpNZ) /* debug */ \ _(prim, JumpZ) /* debug */ \ _(prim, Load) \ _(prim, Loop) \ _(prim, Param) \ _(prim, PackPadded) /* onnx */ \ _(prim, PadPacked) /* onnx */ \ _(prim, Placeholder) /* debug */ \ _(prim, Print) \ _(prim, EmptyListLiteral) \ _(prim, LegacyTypedConstructor) \ _(prim, PythonOp) \ _(prim, IgnoredPythonOp) \ _(prim, Reverse) \ _(prim, Return) \ _(prim, ReturnStmt) \ _(prim, BreakStmt) \ _(prim, ContinueStmt) \ _(prim, ComprehensionScope) \ _(prim, Store) \ _(prim, AutogradZero) \ _(prim, AutogradAnyNonZero) \ _(prim, AutogradAllNonZero) \ _(prim, AutogradAllZero) \ _(prim, Starred) \ _(prim, TupleConstruct) \ _(prim, TupleUnpack) \ _(prim, TupleIndex) \ _(prim, TupleSlice) \ _(prim, ListConstruct) \ _(prim, ListUnpack) \ _(prim, DictConstruct) \ _(prim, ModuleContainerIndex) \ _(prim, EnumName) \ _(prim, EnumValue) \ _(prim, StringIndex) \ _(prim, NumToTensor) \ _(prim, Uninitialized) \ _(prim, VarConcat) \ _(prim, VarStack) \ _(prim, With) \ _(prim, Enter) \ _(prim, Exit) \ _(prim, IfThenElse) \ _(aten, Bool) \ _(aten, Int) \ _(aten, FloatImplicit) \ _(aten, ComplexImplicit) \ _(aten, IntImplicit) \ _(aten, ScalarImplicit) \ _(aten, Float) \ _(aten, Complex) \ _(aten, str) \ _(aten, Delete) \ _(prim, device) \ _(prim, dtype) \ _(prim, layout) \ _(prim, id) \ _(prim, requires_grad) \ _(prim, MakeTestTensor) /* test */ \ _(prim, AutogradAdd) \ _(prim, GradOf) \ _(aten, grad) \ _(aten, backward) \ _(prim, Guard) \ _(prim, BailOut) \ _(prim, TypeCheck) \ _(prim, RequiresGradCheck) \ _(prim, FallbackGraph) \ _(prim, FusedConcat) \ _(prim, ConstantChunk) \ _(prim, MMTreeReduce) \ _(prim, MMBatchSide) \ _(prim, list) \ _(prim, dict) \ _(prim, min) \ _(prim, max) \ _(prim, abs) \ _(aten, divmod) \ _(prim, zip) \ _(prim, enumerate) \ _(prim, range) \ _(prim, rangelist) \ _(prim, isinstance) \ _(prim, tolist) \ _(prim, unchecked_cast) \ _(aten, _grad_sum_to_size) \ _(aten, _size_if_not_equal) \ _(aten, _ncf_unsqueeze) \ _(aten, warn) \ _(aten, sorted) \ _(aten, floordiv) \ _(aten, __range_length) \ _(aten, __derive_index) \ _(aten, __round_to_zero_floordiv) \ _(aten, is_scripting) \ _(aten, _unwrap_optional) \ _(prim, fork) \ _(prim, awaitable) \ _(prim, forkClosure) \ _(prim, awaitableClosure) \ _(prim, awaitable_nowait) \ _(prim, awaitable_wait) \ _(prim, RaiseException) \ _(prim, Closure) \ _(prim, CreateObject) \ _(prim, SetAttr) \ _(prim, GetAttr) \ _(prim, HasAttr) \ _(prim, profile) \ _(prim, profile_ivalue) \ _(prim, AddStatValue) \ _(prim, TimePoint) \ _(prim, CallFunction) \ _(prim, CallMethod) \ _(prim, LoopContinuation) \ _(prim, annotate) \ _(prim, TracedModuleForward) \ _(prim, TracedFork) \ _(prim, TracedAttr) \ _(prim, rpc_async) \ _(prim, rpc_sync) \ _(prim, rpc_remote) \ _(prim, is_cuda) \ _(aten, append) \ _(aten, as_tensor) \ _(aten, adaptive_avg_pool2d_backward) \ _(aten, dim) \ _(aten, format) \ _(aten, percentFormat) \ _(aten, __not__) \ _(aten, __is__) \ _(aten, __isnot__) \ _(aten, _ger) \ _(aten, __getitem__) \ _(aten, _set_item) \ _(aten, manual_seed) \ _(aten, device) \ _(aten, hash) \ _(aten, len) \ _(aten, list) \ _(aten, dict) \ _(aten, wait) \ _(aten, save) \ _(aten, keys) \ _(aten, ord) \ _(aten, chr) \ _(aten, hex) \ _(aten, oct) \ _(aten, clear) \ _(aten, setdefault) \ _(aten, bin) \ _(aten, pop) \ _(aten, insert) \ _(aten, tensor) \ _(prim, unchecked_unwrap_optional) \ _(aten, __contains__) \ _(prim, BailoutTemplate) \ _(prim, grad) \ _(cuda, _set_device) \ _(cuda, set_stream) \ _(cuda, _current_device) \ _(cuda, synchronize) \ _(aten, has_torch_function) \ _(aten, is_autocast_enabled) \ _(aten, is_autocast_cpu_enabled) \ _(aten, is_autocast_xla_enabled) \ FORALL_ATEN_BASE_SYMBOLS(_) \ _(onnx, Add) \ _(onnx, Concat) \ _(onnx, Constant) \ _(onnx, ConstantFill) \ _(onnx, Div) \ _(onnx, GRU) \ _(onnx, Gather) \ _(onnx, Gemm) \ _(onnx, LSTM) \ _(onnx, MatMul) \ _(onnx, Min) \ _(onnx, Max) \ _(onnx, Mul) \ _(onnx, Pow) \ _(onnx, RNN) \ _(onnx, Shape) \ _(onnx, Size) \ _(onnx, Slice) \ _(onnx, Softmax) \ _(onnx, Squeeze) \ _(onnx, Sub) \ _(onnx, Transpose) \ _(onnx, Unsqueeze) \ _(onnx, Loop) \ _(onnx, If) \ _(onnx, Reshape) \ _(onnx, Expand) \ _(onnx, Equal) \ _(onnx, Greater) \ _(onnx, GreaterOrEqual) \ _(onnx, Less) \ _(onnx, LessOrEqual) \ _(onnx, Not) \ _(aten, ATen) \ _(onnx, Split) \ _(onnx, ConstantOfShape) \ _(onnx, Cast) \ _(onnx, Mod) \ _(onnx, Sqrt) \ _(onnx, SplitToSequence) \ _(onnx, SequenceAt) \ _(onnx, SequenceConstruct) \ _(onnx, SequenceEmpty) \ _(onnx, SequenceInsert) \ _(onnx, SequenceErase) \ _(onnx, ConcatFromSequence) \ _(onnx, Identity) \ _(onnx, SoftmaxCrossEntropyLoss) \ _(onnx, NegativeLogLikelihoodLoss) \ _(onnx, LogSoftmax) \ _(onnx, ReduceL1) \ _(onnx, ReduceL2) \ _(onnx, Conv) \ _(onnx, BatchNormalization) \ _(onnx, ReduceMean) \ _(onnx, ReduceProd) \ _(onnx, Relu) \ _(onnx, Neg) \ _(onnx, NonZero) \ _(onnx, Range) \ _(onnx, Tile) \ _(onnx, Where) \ _(onnx, Optional) \ _(onnx, OptionalGetElement) \ _(onnx, OptionalHasElement) \ FORALL_ATTR_BASE_SYMBOLS(_) \ _(attr, Subgraph) \ _(attr, ReverseSubgraph) \ _(attr, f_real_outputs) \ _(attr, df_input_vjps) \ _(attr, df_input_captured_inputs) \ _(attr, df_input_captured_outputs) \ _(attr, df_output_vjps) \ _(attr, axes) \ _(attr, symbolic_shape_inputs) \ _(attr, allow_stack_outputs) \ _(attr, striding_inputs_desc) \ _(attr, striding_outputs_desc) \ _(attr, broadcast) \ _(attr, direction) \ _(attr, ends) \ _(attr, inplace) \ _(attr, input_as_shape) \ _(attr, is_zero) \ _(attr, num_none) \ _(attr, num_present) \ _(attr, perm) \ _(attr, starts) \ _(attr, profiled_type) \ _(attr, transA) \ _(attr, transB) \ _(attr, name) \ _(attr, module) \ _(attr, beg) \ _(attr, idx) \ _(attr, split) \ _(attr, slot) \ _(attr, kinds) \ _(attr, types) \ _(attr, scope) \ _(attr, keepdims) \ _(attr, cache_id) \ _(attr, new_axis) \ _(attr, warn_id) \ _(attr, output_layouts) \ _(attr, allowzero) \ _(attr, seen_none) \ _(attr, overload_name) enum class _keys : unique_t { #define DEFINE_KEY(ns, s) ns##_##s, FORALL_NS_SYMBOLS(DEFINE_KEY) #undef DEFINE_KEY num_symbols }; #define DEFINE_SYMBOL(ns, s) \ namespace ns { constexpr Symbol s(static_cast<unique_t>(_keys::ns##_##s)); } FORALL_NS_SYMBOLS(DEFINE_SYMBOL) #undef DEFINE_SYMBOL } // namespace c10
13,391
36.407821
78
h
null
pytorch-main/aten/src/ATen/core/interned_strings_class.h
#include <cstdint> #include <cstring> #include <mutex> #include <string> #include <unordered_map> #include <vector> #include <ATen/core/symbol.h> #include <c10/util/Exception.h> namespace c10 { struct TORCH_API InternedStrings { InternedStrings(); Symbol symbol(const std::string& s); std::pair<const char*, const char*> string(Symbol sym); Symbol ns(Symbol sym); private: // prereq - holding mutex_ Symbol _symbol(const std::string& s); std::pair<const char*, const char*> customString(Symbol sym); std::unordered_map<std::string, Symbol> string_to_sym_; struct SymbolInfo { Symbol ns; std::string qual_name; std::string unqual_name; }; std::vector<SymbolInfo> sym_to_info_; std::mutex mutex_; }; } // namespace c10
760
20.742857
63
h
null
pytorch-main/aten/src/ATen/core/operator_name.h
#pragma once #include <c10/macros/Macros.h> #include <c10/util/Exception.h> #include <c10/util/Optional.h> #include <c10/util/string_view.h> #include <string> #include <utility> #include <ostream> namespace c10 { // TODO: consider storing namespace separately too struct OperatorName final { std::string name; std::string overload_name; OperatorName(std::string name, std::string overload_name) : name(std::move(name)), overload_name(std::move(overload_name)) {} // TODO: These two functions below are slow! Fix internal data structures so // I don't have to manually reconstruct the namespaces! // Return the namespace of this OperatorName, if it exists. The // returned string_view is only live as long as the OperatorName // exists and name is not mutated c10::optional<c10::string_view> getNamespace() const { auto pos = name.find("::"); if (pos == std::string::npos) { return c10::nullopt; } else { return c10::make_optional(c10::string_view(name.data(), pos)); } } // Returns true if we successfully set the namespace bool setNamespaceIfNotSet(const char* ns) { if (!getNamespace().has_value()) { const auto ns_len = strlen(ns); const auto old_name_size = name.size(); name.resize(ns_len + 2 + old_name_size); // Shift current value of name to the end of the new space. name.replace(name.size() - old_name_size, old_name_size, name, 0, old_name_size); name.replace(0, ns_len, ns, ns_len); name[ns_len] = ':'; name[ns_len + 1] = ':'; return true; } else { return false; } } }; // Non-owning view of an OperatorName. Unlike OperatorName, most of // its functions are constexpr, so it can be used for compile time // computations struct OperatorNameView final { c10::string_view name; c10::string_view overload_name; constexpr OperatorNameView(c10::string_view name, c10::string_view overload_name) : name(name), overload_name(overload_name) {} // Parses strings like "foo.overload" and also "foo" constexpr static OperatorNameView parse(c10::string_view full_name) { auto i = full_name.find('.'); if (i == c10::string_view::npos) { return OperatorNameView(full_name, c10::string_view()); } else { return OperatorNameView(full_name.substr(0, i), full_name.substr(i + 1)); } } }; inline bool operator==(const OperatorName& lhs, const OperatorName& rhs) { return lhs.name == rhs.name && lhs.overload_name == rhs.overload_name; } inline bool operator!=(const OperatorName& lhs, const OperatorName& rhs) { return !operator==(lhs, rhs); } TORCH_API std::string toString(const OperatorName& opName); TORCH_API std::ostream& operator<<(std::ostream&, const OperatorName&); } // namespace c10 namespace std { template <> struct hash<::c10::OperatorName> { size_t operator()(const ::c10::OperatorName& x) const { return std::hash<std::string>()(x.name) ^ (~ std::hash<std::string>()(x.overload_name)); } }; }
3,018
31.462366
94
h
null
pytorch-main/aten/src/ATen/core/qualified_name.h
#pragma once #include <c10/util/ArrayRef.h> #include <c10/util/Exception.h> #include <c10/util/StringUtil.h> #include <c10/util/irange.h> #include <string> namespace c10 { // Represents a name of the form "foo.bar.baz" struct QualifiedName { QualifiedName() = default; // `name` can be a dotted string, like "foo.bar.baz", or just a bare name. /* implicit */ QualifiedName(const std::string& name) { TORCH_CHECK(!name.empty()); // split the string into its atoms. size_t startSearchFrom = 0; size_t pos = name.find(delimiter_, startSearchFrom); while (pos != std::string::npos) { auto atom = name.substr(startSearchFrom, pos - startSearchFrom); TORCH_INTERNAL_ASSERT( !atom.empty(), "Invalid name for qualified name: '", name, "'"); atoms_.push_back(std::move(atom)); startSearchFrom = pos + 1; pos = name.find(delimiter_, startSearchFrom); } auto finalAtom = name.substr(startSearchFrom); TORCH_INTERNAL_ASSERT( !finalAtom.empty(), "Invalid name for qualified name: '", name, "'"); atoms_.emplace_back(std::move(finalAtom)); cacheAccessors(); } explicit QualifiedName(std::vector<std::string> atoms) : atoms_(std::move(atoms)) { for (const auto& atom : atoms_) { TORCH_CHECK(!atom.empty(), "Atom cannot be empty"); TORCH_CHECK( atom.find(delimiter_) == std::string::npos, "Delimiter not allowed in atom"); } cacheAccessors(); } // Unnecessary copy. Ideally we'd use something like std::string_view. /* implicit */ QualifiedName(const char* name) : QualifiedName(std::string(name)) {} // `name` must be a bare name (no dots!) explicit QualifiedName(const QualifiedName& prefix, std::string name) { TORCH_INTERNAL_ASSERT(!name.empty()); TORCH_INTERNAL_ASSERT(name.find(delimiter_) == std::string::npos); atoms_.insert(atoms_.begin(), prefix.atoms_.begin(), prefix.atoms_.end()); atoms_.push_back(std::move(name)); cacheAccessors(); } // Is `this` a prefix of `other`? // For example, "foo.bar" is a prefix of "foo.bar.baz" bool isPrefixOf(const QualifiedName& other) const { const auto& thisAtoms = atoms_; const auto& otherAtoms = other.atoms_; if (thisAtoms.size() > otherAtoms.size()) { // Can't be a prefix if it's bigger return false; } for (const auto i : c10::irange(thisAtoms.size())) { if (thisAtoms[i] != otherAtoms[i]) { return false; } } return true; } // The fully qualified name, like "foo.bar.baz" const std::string& qualifiedName() const { return qualifiedName_; } // The leading qualifier, like "foo.bar" const std::string& prefix() const { return prefix_; } // The base name, like "baz" const std::string& name() const { return name_; } const std::vector<std::string>& atoms() const { return atoms_; } bool operator==(const QualifiedName& other) const { return this->qualifiedName_ == other.qualifiedName_; } bool operator!=(const QualifiedName& other) const { return !(*this == other); } private: static constexpr char delimiter_ = '.'; // Helper for cacheAccessors() below. template<typename T> std::string join(char delimiter, const T& v) { std::string out; size_t reserve = 0; for (const auto& e : v) { reserve += e.size() + 1; } out.reserve(reserve); for (const auto i : c10::irange(v.size())) { if (i != 0) { out.push_back(delimiter); } out.append(v[i]); } return out; } void cacheAccessors() { qualifiedName_ = join(delimiter_, atoms_); if (atoms_.size() > 1) { ArrayRef<std::string> view(atoms_); const auto prefixView = view.slice(0, view.size() - 1); prefix_ = join(delimiter_, prefixView); } if (!atoms_.empty()) { name_ = atoms_.back(); } } // The actual list of names, like "{foo, bar, baz}" std::vector<std::string> atoms_; /* * Cached accessors, derived from `atoms_`. */ std::string qualifiedName_; std::string prefix_; std::string name_; }; } // namespace c10 namespace std { template <> struct hash<c10::QualifiedName> { size_t operator()(const c10::QualifiedName& n) const noexcept { return std::hash<std::string>()(n.qualifiedName()); } }; } // namespace std
4,373
26
85
h
null
pytorch-main/aten/src/ATen/core/rref_interface.h
#pragma once #include <c10/util/intrusive_ptr.h> #include <ATen/core/type_ptr.h> namespace c10 { struct Type; using worker_id_t = int16_t; // This abstract class contains only user-facing APIs, and will be shared // between jit and distributed to implement TorchScript support. class C10_EXPORT RRefInterface : public c10::intrusive_ptr_target { public: RRefInterface() = default; // RRef is made NOT copyable NOT movable to prevent messing up reference // counting. RRefInterface(const RRefInterface& other) = delete; RRefInterface(RRefInterface&& other) = delete; RRefInterface& operator=(RRefInterface&& other) = delete; ~RRefInterface() override = default; // returns the worker id of the owner virtual worker_id_t owner() const = 0; // returns the worker name of the owner virtual std::string ownerName() const = 0; // Returns true if this is the ``OwnerRRef`` virtual bool isOwner() const = 0; // Returns true if this is an ``OwnerRRef`` or if this ``UserRRef`` has been // confirmed by its owner. virtual bool confirmedByOwner() const = 0; virtual const TypePtr type() const = 0; }; }
1,138
26.780488
78
h
null
pytorch-main/aten/src/ATen/core/stack.h
#pragma once #include <type_traits> #include <ATen/core/ivalue.h> #include <c10/util/Deprecated.h> #include <c10/util/irange.h> // TODO move this to c10 namespace namespace torch { namespace jit { using c10::IValue; using Stack = std::vector<IValue>; class Operation { template <typename F, typename Arg> using accepts = std::is_constructible<std::function<void(Arg)>, F&&>; public: template <typename F, std::enable_if_t<accepts<F, Stack*>::value, int> = 0> C10_DEPRECATED_MESSAGE("Please use void(Stack&) to register operator instead.") Operation(F&& raw): op_([raw = std::forward<F>(raw)](Stack& stack) { raw(&stack); }) {} template <typename F, std::enable_if_t<accepts<F, Stack&>::value && !std::is_same<std::decay_t<F>, Operation>::value, int> = 0> Operation(F&& op): op_(std::forward<F>(op)) {} Operation(std::nullptr_t) noexcept {} explicit operator bool() const noexcept { return op_ ? true : false; } void operator()(Stack& stack) { op_(stack); } template <typename T> T* target() noexcept { return op_.target<T>(); } private: std::function<void(Stack&)> op_; }; // An operation with N inputs and M outputs pops the last N inputs off // the stack and pushes its M inputs onto the stack // before: <other stack items> I0, I1, ... IN <- stack.back() // after: <other stack items> O0, O1, ... OM // operations are defined this way so that ownership of inputs can be // transferred to the operation and it can incrementally drop ownership of // tensors when they become unneeded. For large operations, like 'run an entire // subgraph', this functionality is very important for minimizing gpu memory // usage return value is the relative 'offset' to jump to for the next // operation: // pc += 1 + offset // so a return value of 0 goes to the next instruction // treat the last N elements of the stack as a list, looking up // element i static inline IValue& peek(Stack& stack, size_t i, size_t N) { return *(stack.end() - N + i); } static inline IValue& peek(Stack* stack, size_t i, size_t N) { return peek(*stack, i, N); } static inline const IValue& peek(const Stack& stack, size_t i, size_t N) { return *(stack.end() - N + i); } static inline const IValue& peek(const Stack* stack, size_t i, size_t N) { return peek(*stack, i, N); } // treat the last N elements of the stack as a list, looking up the // slice starting at index i and having length len static inline at::ArrayRef<IValue> peekSlice( const Stack& stack, size_t i, size_t len, size_t N) { return at::ArrayRef<IValue>(stack).slice(stack.size() - N + i, len); } static inline at::ArrayRef<IValue> last(const Stack& stack, size_t N) { return peekSlice(stack, 0, N, N); } static inline at::ArrayRef<IValue> last(const Stack* stack, size_t N) { return last(*stack, N); } static inline void drop(Stack& stack, size_t n) { stack.erase(stack.end() - n, stack.end()); } static inline void drop(Stack* stack, size_t n) { drop(*stack, n); } static inline IValue pop(Stack& stack) { auto r = std::move(stack.back()); stack.pop_back(); return r; } static inline IValue pop(Stack* stack) { return pop(*stack); } static inline std::vector<IValue> pop(Stack& stack, size_t n) { std::vector<IValue> result; result.reserve(n); for (const auto i : c10::irange(n)) { result.push_back(std::move(peek(stack, i, n))); } drop(stack, n); return result; } // variadic pop: // int64_t a; at::Tensor b; // pop(stack, a, b); // equivalent to: // b = pop(stack).toTensor(); // a = pop(stack).toInt(); template <typename... Types> static inline void pop(Stack& stack, Types&... args) { size_t i = 0; constexpr size_t N = sizeof...(args); (void)std::initializer_list<int>{ (args = std::move(peek(stack, i++, N)).template to<Types>(), 0)...}; drop(stack, N); } template <typename... Types> static inline void pop(Stack* stack, Types&... args) { pop(*stack, args...); } template <typename Type> static inline void push_one(Stack& stack, Type&& arg) { stack.emplace_back(std::forward<Type>(arg)); } static inline void push_one(Stack& stack, c10::TensorOptions options) { stack.emplace_back(c10::typeMetaToScalarType(options.dtype())); stack.emplace_back(options.layout()); stack.emplace_back(options.device()); stack.emplace_back(options.pinned_memory()); } template <typename... Types> static inline void push(Stack& stack, Types&&... args) { (void)std::initializer_list<int>{(push_one(stack, std::forward<Types>(args)), 0)...}; } template <typename... Types> static inline void push(Stack* stack, Types&&... args) { return push(*stack, std::forward<Types>(args)...); } template <class T> static inline void push_list_elements(Stack& stack, const c10::List<T>& elements) { for (T elem : elements) { stack.push_back(std::move(elem)); } } // The packer here is carefully written not to make any unnecessary // copies. // pack takes the return values of aten functions pushes them onto the stack template <typename T> inline void pack(Stack& stack, T&& v) { stack.emplace_back(std::forward<T>(v)); } template <typename T> inline void pack(Stack* stack, T&& v) { pack(*stack, std::forward<T>(v)); } template <std::size_t remaining, typename... Args> struct TuplePacker { // NB: *Not* a universal reference. static void execute(Stack& stack, std::tuple<Args...>&& t) { // NB: The move here does not "destroy" the entire tuple, that is // not what std::move does; only the particular tuple index // processed here gets stolen. pack(stack, std::get<sizeof...(Args) - remaining>(std::move(t))); TuplePacker<remaining - 1, Args...>::execute(stack, std::move(t)); } }; template <typename... Args> struct TuplePacker<0, Args...> { static void execute(Stack& /*stack*/, std::tuple<Args...>&& /*t*/){}; }; template <typename... Args> inline void pack(Stack& stack, std::tuple<Args...>&& t) { TuplePacker<sizeof...(Args), Args...>::execute(stack, std::move(t)); } } // namespace jit } // namespace torch
6,076
29.233831
87
h
null
pytorch-main/aten/src/ATen/core/symbol.h
#pragma once #include <c10/macros/Export.h> #include <cstdint> #include <functional> // For std::hash #include <string> namespace c10 { // 'prim' symbols are synthetic operators that occur only in the IR // and don't have corresponding implementations in ATen. // 'onnx' symbols correspond to ONNX operators. Their semantics // are defined in https://github.com/onnx/onnx/blob/master/docs/Operators.md // The particular version we are targeting is specified by '_onnx_opset_version' // in torch.onnx.symbolic_helper // // In general, most ONNX operators won't get an entry here, because they // are handled from the Python end. However, you may occasionally need // to intern an ONNX symbol here so that you can conveniently write an // optimization on ONNX operations. // 'attr' symbols are attribute keys. They are shared between both ONNX and ATen // operators (you disambiguate their meaning by looking at the operator itself). // In general, you only need to define attribute keys that are used by // onnx or prim; ATen attributes are automatically generated in FORALL_ATTR_BASE_SYMBOLS. // Note [Symbol allocation] // ~~~~~~~~~~~~~~~~~~~~~~~~ // // 1. Symbol namespace is split up into namespaces. // // 2. The intended access pattern for built-in symbols is onnx::MatMul // in the c10 namespace (this is a Symbol). // // Built-in constant definition strategy: // - Enum is the most convenient way to generate a contiguous sequence // of numbers for an identifier. // - However, an enum gives you a fresh type. We want onnx::MatMul to // be type Symbol, not some random enum type! // - Therefore, after using enums to generate the sequence of integers, // we then declare constexpr Symbols to get everything the actual Symbol // type we want. Symbols must be constexpr to be valid to be "case"ed on. using unique_t = uint32_t; const std::string& domain_prefix(); // A Symbol is like an interned string, but with a little extra // structure; it is namespaced via SymbolNamespace and the resulting // intern pointers support efficient namespace testing. struct TORCH_API Symbol { explicit constexpr Symbol() : value(0) {}; explicit constexpr Symbol(unique_t uniq) : value(uniq) {} // Get a Symbol for a qualified string like "attr::bar" static Symbol fromQualString(const std::string & s); // Get a Symbol from a domain and an unqualified string like "org.pytorch.attr" and "bar" static Symbol fromDomainAndUnqualString(const std::string & d, const std::string & s); // Constructors for our various namespaced strings. This will construct // the appropriate namespaced string, e.g., "attr::foo" for the // argument "foo", and then attempt to intern it. DO NOT USE THIS // with a string literal; attr::foo should be available in that case // (and if it's not, you should add it to the built-ins list above.) static Symbol attr(const std::string & s); static Symbol aten(const std::string & s); static Symbol cuda(const std::string & s); static Symbol onnx(const std::string & s); static Symbol prim(const std::string & s); static Symbol user(const std::string & s); static Symbol caffe2(const std::string & s); static Symbol dimname(const std::string & s); // TODO: eliminate me static Symbol scope(const std::string & s); bool is_attr() const; bool is_aten() const; bool is_cuda() const; bool is_prim() const; bool is_prims() const; bool is_nvprims() const; bool is_onnx() const; bool is_user() const; bool is_caffe2() const; bool is_dimname() const; // So we can switch on this constexpr operator unique_t() const { return value; } Symbol ns() const; // Give a string corresponding to the unqualified version of this name, e.g., // "mm". Use this in a context where the intended namespace of the string is // obvious; this is a *lossy* conversion. const char * toUnqualString() const; // Give a string corresponding to the qualified version of this name, // e.g., "aten::mm". This string format is made available to Python bindings // (so we know how to parse it.) const char * toQualString() const; // This describes a symbol in a case where humans read it. At the moment it's // the same as toQualString. This has to be a const char* returned because // a lot of printf style macros use it. const char * toDisplayString() const; // Give a string corresponding to the domain name for the symbol, // e.g., "org.pytorch.aten". std::string domainString() const; private: explicit Symbol(Symbol ns, const std::string & s); unique_t value; }; static inline bool operator==(Symbol lhs, Symbol rhs) { return static_cast<unique_t>(lhs) == static_cast<unique_t>(rhs); } inline Symbol Symbol::attr(const std::string & s) { return Symbol::fromQualString("attr::" + s); } inline Symbol Symbol::aten(const std::string & s) { return Symbol::fromQualString("aten::" + s); } inline Symbol Symbol::cuda(const std::string & s) { return Symbol::fromQualString("cuda::" + s); } inline Symbol Symbol::onnx(const std::string & s) { return Symbol::fromQualString("onnx::" + s); } inline Symbol Symbol::prim(const std::string & s) { return Symbol::fromQualString("prim::" + s); } inline Symbol Symbol::scope(const std::string & s) { return Symbol::fromQualString("scope::" + s); } inline Symbol Symbol::user(const std::string & s) { return Symbol::fromQualString("user::" + s); } inline Symbol Symbol::caffe2(const std::string & s) { return Symbol::fromQualString("_caffe2::" + s); } inline Symbol Symbol::dimname(const std::string & s) { return Symbol::fromQualString("dimname::" + s); } } // namespace c10 // make symbol behave like an integer in hash tables namespace std { template <> struct hash<c10::Symbol> { size_t operator()(c10::Symbol s) const { return std::hash<uint32_t>()(static_cast<uint32_t>(s)); } }; }
5,874
38.695946
104
h
null
pytorch-main/aten/src/ATen/core/type_factory.h
#pragma once #include <type_traits> #include <unordered_map> #include <ATen/core/dynamic_type.h> #include <ATen/core/jit_type_base.h> #include <c10/macros/Macros.h> namespace c10 { template <typename T> struct TORCH_API TypeFactoryBase {}; template <> struct TORCH_API TypeFactoryBase<c10::DynamicType> { template <typename T, typename... Args> static c10::DynamicTypePtr create(TypePtr ty, Args&&... args) { return std::make_shared<c10::DynamicType>( c10::DynamicTypeTrait<T>::tagValue(), c10::DynamicType::Arguments(c10::ArrayRef<c10::TypePtr>( {std::move(ty), std::forward<Args>(args)...}))); } template <typename T> static c10::DynamicTypePtr create(std::vector<c10::TypePtr> types) { return std::make_shared<c10::DynamicType>( c10::DynamicTypeTrait<T>::tagValue(), c10::DynamicType::Arguments(types)); } static c10::DynamicTypePtr createNamedTuple( const std::string& name, const std::vector<c10::string_view>& fields, const std::vector<c10::TypePtr>& types) { return std::make_shared<c10::DynamicType>( c10::DynamicType::Tag::Tuple, name, c10::DynamicType::Arguments(fields, types)); } template <typename T> C10_ERASE static c10::DynamicTypePtr createNamed(const std::string& name) { return std::make_shared<c10::DynamicType>( c10::DynamicTypeTrait<T>::tagValue(), name, c10::DynamicType::Arguments{}); } template <typename T> C10_ERASE static c10::DynamicTypePtr get() { return DynamicTypeTrait<T>::getBaseType(); } static const std::unordered_map<std::string, c10::TypePtr>& basePythonTypes(); }; using DynamicTypeFactory = TypeFactoryBase<c10::DynamicType>; // Helper functions for constructing DynamicTypes inline. template < typename T, std::enable_if_t<DynamicTypeTrait<T>::isBaseType, int> = 0> C10_ERASE DynamicTypePtr dynT() { return DynamicTypeFactory::get<T>(); } template < typename T, typename... Args, std::enable_if_t<!DynamicTypeTrait<T>::isBaseType, int> = 0> C10_ERASE DynamicTypePtr dynT(Args&&... args) { return DynamicTypeFactory::create<T>(std::forward<Args>(args)...); } template <> struct TORCH_API TypeFactoryBase<c10::Type> { template <typename T, typename... Args> static c10::TypePtr create(TypePtr ty, Args&&... args) { return T::create(std::move(ty), std::forward<Args>(args)...); } template <typename T> static c10::TypePtr create(std::vector<c10::TypePtr> types) { return T::create(std::move(types)); } static c10::TypePtr createNamedTuple( const std::string& name, const std::vector<c10::string_view>& fields, const std::vector<c10::TypePtr>& types); template <typename T> C10_ERASE static c10::TypePtr createNamed(const std::string& name) { return T::create(name); } static const std::unordered_map<std::string, c10::TypePtr>& basePythonTypes(); template <typename T> C10_ERASE static c10::TypePtr get() { return T::get(); } }; using DefaultTypeFactory = TypeFactoryBase<c10::Type>; using PlatformType = #ifdef C10_MOBILE c10::DynamicType #else c10::Type #endif ; using TypeFactory = TypeFactoryBase<PlatformType>; } // namespace c10
3,245
28.779817
80
h
null
pytorch-main/aten/src/ATen/core/type_ptr.h
#pragma once #include <memory> #include <type_traits> #include <c10/util/Exception.h> #include <c10/util/MaybeOwned.h> namespace c10 { // Compatibility wrapper around a raw pointer so that existing code // written to deal with a shared_ptr can keep working. template <typename T> class SingletonTypePtr { public: /* implicit */ SingletonTypePtr(T* p) : repr_(p) {} // We need this to satisfy Pybind11, but it shouldn't be hit. explicit SingletonTypePtr(std::shared_ptr<T>) { TORCH_CHECK(false); } using element_type = typename std::shared_ptr<T>::element_type; template <typename U = T, std::enable_if_t<!std::is_same<std::remove_const_t<U>, void>::value, bool> = true> T& operator*() const { return *repr_; } T* get() const { return repr_; } T* operator->() const { return repr_; } operator bool() const { return repr_ != nullptr; } private: T* repr_{nullptr}; }; template <typename T, typename U> bool operator==(SingletonTypePtr<T> lhs, SingletonTypePtr<U> rhs) { return (void*)lhs.get() == (void*)rhs.get(); } template <typename T, typename U> bool operator!=(SingletonTypePtr<T> lhs, SingletonTypePtr<U> rhs) { return !(lhs == rhs); } } // namespace c10
1,223
21.254545
110
h
null
pytorch-main/aten/src/ATen/core/boxing/BoxedKernel.h
#pragma once #include <ATen/core/boxing/OperatorKernel.h> #include <c10/core/DispatchKeySet.h> #include <c10/util/intrusive_ptr.h> namespace c10 { struct IValue; using Stack = std::vector<IValue>; class OperatorHandle; class KernelFunction; // This kernel implements the behavior of falling through to the next available // registered dispatch key. The implementation of this function is FAST; it is // no overhead to fallthrough to the next key. See cpp file for some more // implementation notes; notably, this does NOT actually go through the // boxing/unboxing codepath. TORCH_API void fallthrough_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); // Note [Ambiguity in AutogradOther kernel] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This error-reporting kernel is registered to the AutogradOther entry in the // dispatch table when there is both a CompositeImplicitAutograd kernel and a // backend kernel for ANY backend that maps to AutogradOther. To see why // this is necessary in the AutogradOther case, it's helpful to first see // why everything works out fine for a backend that has a reserved Autograd // entry (see rule 2.2 in [Note] DispatchTable computation): // // CPU AutogradCPU // reg? registers with... // ------------------------------------------------- // y Autograd registration takes precedence // over CompositeImplicitAutograd. // This is good, because the CPU specific backend // implementation is more specialized and typically better; // if we used the composite, we would bypass it. // (NB: the Autograd key is guaranteed to exist because // the autograd codegen requires it!) // // n CompositeImplicitAutograd takes precedence. // This is also good, because the Autograd // registration (if it exists) would try to redispatch // to the (non-existent) CPU implementation; by // using the composite, we ensure the operator // actually works. // // As you can see, when we have a specific Autograd key (AutogradCPU), we can // decide whether or not to use the CompositeImplicitAutograd kernel or the // Autograd kernel based on whether or not the backend kernel exists. // // However, for AutogradOther (which is the catchall autograd kernel for // everything that doesn't have a specific Autograd key), we can't do this // trick because there isn't any unique backend to peek at to disambiguate; // if there are some backends that have implementations they prefer Autograd, // but unimplemented backends would prefer CompositeImplicitAutograd. Rather // than arbitrarily pick one or the other, we just register a kernel that raises // an error and let the user decide how to proceed. TORCH_API void ambiguous_autogradother_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); // Note [named_not_supported_kernel] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This kernel implements reporting an error message saying that named tensor is // not supported. This kernel doesn't rely on the Stack, and so it is special // cased in the dispatcher to be triggered before we attempt boxing (so we can // give a good error message in cases when boxing is not supported). When // boxing is universally supported this can be removed. [[noreturn]] TORCH_API void named_not_supported_kernel(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); /** * BoxedKernel is similar to a std::function storing a boxed kernel. */ class TORCH_API BoxedKernel final { public: // This is how boxed kernels are actually stored // // Note [Plumbing Keys Through The Dispatcher] // Benchmarks have shown that it is expensive for the dispatcher to read from thread-local storage (TLS) // upon every dispatch call into order to compute which kernel to dispatch to. // // To mitigate this, we've updated the calling convention inside the dispatcher to expect every kernel that it stores // to have a first argument of type DispatchKeySet. // // What are the invariants of the DispatchKeySet when it gets passed to a kernel? // - All keys to the left of the current dispatch key have been masked out. // (e.g. a Tracing kernel that takes in the DispatchKeySet will expect the highest bit to be DispatchKey::Tracer) // - All other keys that dispatcher normally would have computed through TLS + global state + op arguments // are still in the set. // // Kernels can then opt into using this keyset to save the dispatcher from doing repeated work during redispatches: // recalculating the highest-priority dispatch key, which involves reading from TLS. Instead, the kernels that opt in will // calculate an updated DispatchKeySet directly from the old one, and pass the updated set directly into the dispatcher // upon redispatching. // // This is an opt-in mechanism: Kernels can automatically opt in by setting the first argument in their signature // to be of type DispatchKeySet. See the kernels in VariableTypeEverything.cpp and TraceTypeEverything.cpp for examples. // // The mechanism for optionally passing that DispatchKeySet into the kernel lives in make_boxed_from_unboxed_functor.h. // See Note [Plumbing Keys Through The Dispatcher 2] for details. using InternalBoxedKernelFunction = void(OperatorKernel*, const OperatorHandle&, DispatchKeySet, Stack*); // This is the public API for how boxed kernels are defined using BoxedKernelFunction = void(const OperatorHandle&, Stack*); using BoxedKernelFunction_withDispatchKeys = void(const OperatorHandle&, DispatchKeySet, Stack*); BoxedKernel(); // Fast path for dispatch to allow not touching the boxed kernel in // the common case where unboxed is available. bool isValid() const; bool isFallthrough() const; /** * Call the function with boxed arguments. */ void callBoxed(const OperatorHandle& opHandle, DispatchKeySet dispatchKeySet, Stack* stack) const; /** * Create a KernelFunction from a boxed function. * * Example: * * > void boxed_func(OperatorKernel*, Stack* stack) {...} * > BoxedFunction func = BoxedKernel::makeFromFunction<&boxed_func>(); */ template<BoxedKernelFunction* func> static BoxedKernel makeFromFunction(); /** * TODO: This will only be useful if we write a backend fallback that plumbs dispatch keys (currently there are none) * See Note [Plumbing Keys Through The Dispatcher] for details. */ template<BoxedKernelFunction_withDispatchKeys* func> static BoxedKernel makeFromFunction(); /** * Create a KernelFunction from a boxed functor. * * Example: * * > class MyFunctor final : public c10::OperatorKernel { * > public: * > void operator()(const OperatorHandle&, DispatchKeySet, Stack*) {...} * > }; * > BoxedKernel func = BoxedKernel::makeFromFunctor(std::make_unique<MyFunctor>()); */ template<class KernelFunctor> static BoxedKernel makeFromFunctor(std::unique_ptr<KernelFunctor> kernelFunctor); static BoxedKernel makeFallthrough(); static BoxedKernel makeAmbiguousAutogradOther(); static BoxedKernel makeNamedNotSupported(); private: friend class KernelFunction; template<BoxedKernelFunction* func> static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack); template<BoxedKernelFunction_withDispatchKeys* func> static void make_boxed_function(OperatorKernel*, const OperatorHandle& opHandle, DispatchKeySet, Stack* stack); explicit BoxedKernel(std::unique_ptr<OperatorKernel> functor, InternalBoxedKernelFunction* boxed_kernel_func); OperatorKernel* getFunctor() const; InternalBoxedKernelFunction* getFnPtr() const; c10::intrusive_ptr<OperatorKernel> functor_; InternalBoxedKernelFunction* boxed_kernel_func_; }; } // namespace c10 #include <ATen/core/boxing/BoxedKernel_impl.h>
7,924
43.774011
124
h
null
pytorch-main/aten/src/ATen/core/boxing/OperatorKernel.h
#pragma once #include <c10/util/intrusive_ptr.h> namespace c10 { /** * Inherit from OperatorKernel to implement a c10 kernel. * * Example: * > namespace { * > class my_kernel_cpu final : public c10::OperatorKernel { * > public: * > Tensor operator()(Tensor a, Tensor b) {...} * > }; * > } * * The kernel class is allowed to have members but these are equivalent * to global variables. The kernel implementation is responsible for * preventing race conditions on them. * * See below for how to register this kernel with PyTorch. */ struct TORCH_API OperatorKernel : public c10::intrusive_ptr_target { ~OperatorKernel() override = default; }; } // namespace c10
692
23.75
71
h
null
pytorch-main/aten/src/ATen/core/boxing/impl/test_helpers.h
#pragma once #include <gtest/gtest.h> #include <gmock/gmock.h> #include <ATen/core/Tensor.h> #include <ATen/core/dispatch/Dispatcher.h> #include <ATen/core/ivalue.h> #include <c10/core/CPUAllocator.h> #include <c10/util/irange.h> template<class... Inputs> inline std::vector<c10::IValue> makeStack(Inputs&&... inputs) { return {std::forward<Inputs>(inputs)...}; } inline at::Tensor dummyTensor(c10::DispatchKeySet ks, bool requires_grad=false) { auto* allocator = c10::GetCPUAllocator(); int64_t nelements = 1; auto dtype = caffe2::TypeMeta::Make<float>(); int64_t size_bytes = nelements * dtype.itemsize(); auto storage_impl = c10::make_intrusive<c10::StorageImpl>( c10::StorageImpl::use_byte_size_t(), size_bytes, allocator->allocate(size_bytes), allocator, /*resizable=*/true); at::Tensor t = at::detail::make_tensor<c10::TensorImpl>(storage_impl, ks, dtype); // TODO: We add this to simulate the ideal case where we only have Autograd backend keys // on Tensor when it requires grad. But currently Autograd keys are added in TensorImpl // constructor by default. if (!requires_grad) { t.unsafeGetTensorImpl()->remove_autograd_key(); } return t; } inline at::Tensor dummyTensor(c10::DispatchKey dispatch_key, bool requires_grad=false) { return dummyTensor(c10::DispatchKeySet(dispatch_key), requires_grad); } template<class... Args> inline std::vector<c10::IValue> callOp(const c10::OperatorHandle& op, Args... args) { auto stack = makeStack(std::forward<Args>(args)...); op.callBoxed(&stack); return stack; } template<class Result, class... Args> inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) { return op.typed<Result(Args...)>().call(std::forward<Args>(args)...); } template<class Result, class... Args> inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) { return op.typed<Result(Args...)>().callWithDispatchKey(dispatchKey, std::forward<Args>(args)...); } template<class Result, class... Args> inline Result callOpUnboxedWithPrecomputedDispatchKeySet(const c10::OperatorHandle& op, c10::DispatchKeySet ks, Args... args) { return op.typed<Result(Args...)>().redispatch(ks, std::forward<Args>(args)...); } inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) { auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); EXPECT_ANY_THROW( callOp(*op, dummyTensor(dispatch_key), 5); ); } inline void expectDoesntFindOperator(const char* op_name) { auto op = c10::Dispatcher::singleton().findSchema({op_name, ""}); EXPECT_FALSE(op.has_value()); } template<class Exception, class Functor> inline void expectThrows(Functor&& functor, const char* expectMessageContains) { try { std::forward<Functor>(functor)(); } catch (const Exception& e) { EXPECT_THAT(e.what(), testing::HasSubstr(expectMessageContains)); return; } ADD_FAILURE() << "Expected to throw exception containing \"" << expectMessageContains << "\" but didn't throw"; } template<class T, size_t N> void expectListEquals(c10::ArrayRef<T> expected, std::array<T, N> actual) { EXPECT_EQ(expected.size(), actual.size()); for (const auto i : c10::irange(expected.size())) { EXPECT_EQ(expected[i], actual[i]); } } template<class T> void expectListEquals(c10::ArrayRef<T> expected, c10::ArrayRef<T> actual) { EXPECT_EQ(expected.size(), actual.size()); for (const auto i : c10::irange(expected.size())) { EXPECT_EQ(expected[i], actual[i]); } } template<class T> void expectListEquals(c10::ArrayRef<T> expected, c10::List<T> actual) { EXPECT_EQ(expected.size(), actual.size()); for (const auto i : c10::irange(expected.size())) { EXPECT_EQ(expected[i], actual.get(i)); } } template<class T> void expectListEquals(c10::ArrayRef<T> expected, std::vector<T> actual) { EXPECT_EQ(expected.size(), actual.size()); for (const auto i : c10::irange(expected.size())) { EXPECT_EQ(expected[i], actual[i]); } } // NB: This is not really sound, but all of the type sets constructed here // are singletons so it's fine static inline c10::DispatchKey extractDispatchKey(const at::Tensor& t) { return legacyExtractDispatchKey(t.key_set()); }
4,296
33.376
127
h
null
pytorch-main/aten/src/ATen/core/dispatch/CppSignature.h
#pragma once #include <typeindex> #include <c10/core/DispatchKeySet.h> #include <c10/macros/Macros.h> #include <c10/util/Metaprogramming.h> #include <c10/util/Type.h> namespace c10 { namespace impl { // A CppSignature object holds RTTI information about a C++ function signature at runtime // and can compare them or get a debug-printable name. class TORCH_API CppSignature final { public: CppSignature(const CppSignature&) = default; CppSignature(CppSignature&&) noexcept = default; CppSignature& operator=(const CppSignature&) = default; CppSignature& operator=(CppSignature&&) noexcept = default; template<class FuncType> static CppSignature make() { // Normalize functors, lambdas, function pointers, etc. into the plain function type // The first argument of the schema might be of type DispatchKeySet, in which case we remove it. // We do this to guarantee that all CppSignature's for an operator will match, even if they're registered // with different calling conventions. // See Note [Plumbing Keys Through The Dispatcher] using decayed_function_type = typename c10::remove_DispatchKeySet_arg_from_func<std::decay_t<FuncType>>::func_type; return CppSignature(std::type_index(typeid(decayed_function_type))); } std::string name() const { return c10::demangle(signature_.name()); } friend bool operator==(const CppSignature& lhs, const CppSignature& rhs) { if (lhs.signature_ == rhs.signature_) { return true; } // Without RTLD_GLOBAL, the type_index comparison could yield false because // they point to different instances of the RTTI data, but the types would // still be the same. Let's check for that case too. // Note that there still is a case where this might not work, i.e. when // linking libraries of different compilers together, they might have // different ways to serialize a type name. That, together with a missing // RTLD_GLOBAL, would still fail this. if (0 == strcmp(lhs.signature_.name(), rhs.signature_.name())) { return true; } return false; } private: explicit CppSignature(std::type_index signature): signature_(std::move(signature)) {} std::type_index signature_; }; inline bool operator!=(const CppSignature& lhs, const CppSignature& rhs) { return !(lhs == rhs ); } } }
2,455
36.212121
123
h
null
pytorch-main/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h
#pragma once #include <cstdint> #include <ATen/core/function_schema.h> #include <ATen/core/jit_type.h> #include <c10/util/Bitset.h> #include <c10/core/DispatchKeySet.h> #include <c10/util/irange.h> #include <ATen/core/Variadic.h> #include <ATen/core/stack.h> namespace c10 { namespace impl { // Take a DispatchKeySet for a Tensor and determine what the actual dispatch // DispatchKey should be, taking into account TLS, and skipping backends which // fall through. // // Unlike Tensor::key_set(), the value of this on a tensor can change depending // on TLS. // // NB: If there is no valid dispatch key, this will return Undefined static inline DispatchKeySet computeDispatchKeySet( DispatchKeySet ks, // The key mask lets us eliminate (by zero entries) keys which should not // be considered for dispatch. There are two cases when we use this: // // - If an operator's dispatch table contains a fallthrough entry, we // should bypass it entirely when finding the key // - If a user invokes with redispatch, the mask lets us // zero out the key the user asked us to stop. // // These excluded backends are NOT tracked in the TLS, but must be applied // AFTER TLS (since the backend may have been introduced for consideration // by the included TLS), which is why you have to pass them in to this // function (as opposed to just applying it to the input 'ks'). DispatchKeySet key_mask ) { c10::impl::LocalDispatchKeySet local = c10::impl::tls_local_dispatch_key_set(); // TODO: It's a bit irritating that we have to do logical ORs here, it would // be nice to only do one. Can always_included be folded into the TLS? Well, // it's a bit troublesome, because fastpath TLS access requires the type of // the TLS in question to be zero-initialized, so you don't actually win // anyting in that case. return (((ks | local.included_) - local.excluded_) & key_mask); } } namespace detail { // A small gadget to extract the DispatchKeySet from types which are known // to have it. Used to extract dispatch keys from unboxed calls. struct MultiDispatchKeySet : at::IterArgs<MultiDispatchKeySet> { DispatchKeySet ts; void operator()(const at::Tensor& x) { ts = ts | x.key_set(); } void operator()(const c10::optional<at::Tensor>& x) { if (x.has_value()) { ts = ts | x->key_set(); } } void operator()(at::ArrayRef<at::Tensor> xs) { for (const auto& x : xs) { ts = ts | x.key_set(); } } // Tensor?[] translates to this case. void operator()(const c10::List<c10::optional<at::Tensor>>& xs) { for (c10::optional<at::Tensor> x : xs) { if (x.has_value()) { ts = ts | x.value().key_set(); } } } // Structured Tensor[] translates to this case void operator()(at::ITensorListRef xs) { for (const auto& x : xs) { ts = ts | x.key_set(); } } [[noreturn]] void operator()(at::ArrayRef<c10::optional<at::Tensor>>) { // Just checking that the handling of Tensor?[] didn't change. TORCH_INTERNAL_ASSERT(false); } void operator()(const at::Generator& gen) { if (gen.defined()) { ts = ts | gen.key_set(); } } void operator()(const c10::optional<at::Generator>& gen) { if (gen.has_value() && gen->defined()) { ts = ts | gen->key_set(); } } template <typename T> void operator()(const T&) { // do nothing } }; // NB: take by const reference (Don't do universal forwarding here! You // don't want to move into this function!) template <typename... Args> DispatchKeySet multi_dispatch_key_set(const Args&... args) { return MultiDispatchKeySet().apply(args...).ts; } } /** * An instance of DispatchKeyExtractor knows how to get a dispatch key given * a list of arguments for an operator call. * * The instance is specific for a certain operator as: * - In boxed dispatch, different operators have different ways to extract * the dispatch key (e.g. different numbers of arguments), and we precompute * the stack locations we should look at; and * - In all dispatch, some backends should be excluded from dispatch because * they have been registered as fallthrough. The set of excluded backends * varies from operator, as some operators may have overridden the * fallthrough with custom behavior. * * Note - this should maintain identical impl to the py dispatcher key extraction logic * at pytorch/torch/dispatcher.py */ struct TORCH_API DispatchKeyExtractor final { public: static DispatchKeyExtractor make(const FunctionSchema& schema) { return DispatchKeyExtractor(makeBitsetForDispatchArgs(schema)); } static DispatchKeyExtractor makeUninitialized() { return DispatchKeyExtractor(c10::utils::bitset()); } void registerSchema(const FunctionSchema& schema) { TORCH_INTERNAL_ASSERT(dispatch_arg_indices_reverse_.is_entirely_unset()); dispatch_arg_indices_reverse_ = makeBitsetForDispatchArgs(schema); } void deregisterSchema() { dispatch_arg_indices_reverse_ = c10::utils::bitset(); } DispatchKeySet getDispatchKeySetBoxed(const torch::jit::Stack* stack) const { DispatchKeySet ks; dispatch_arg_indices_reverse_.for_each_set_bit([&] (size_t reverse_arg_index) { const auto& ivalue = torch::jit::peek(*stack, 0, reverse_arg_index + 1); if (C10_LIKELY(ivalue.isTensor())) { // NB: Take care not to introduce a refcount bump (there's // no safe toTensorRef method, alas) ks = ks | ivalue.unsafeToTensorImpl()->key_set(); } else if (C10_UNLIKELY(ivalue.isTensorList())) { for (const at::Tensor& tensor : ivalue.toTensorList()) { ks = ks | tensor.key_set(); } } // Tensor?[] translates to a c10::List<IValue> so we need to peek inside else if (C10_UNLIKELY(ivalue.isList())) { for (const auto& elt : ivalue.toListRef()) { if (elt.isTensor()) { ks = ks | elt.toTensor().key_set(); } } } }); // Keys that are fallthrough should be skipped if (requiresBitsetPerBackend_) { auto backend_idx = ks.getBackendIndex(); return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]); } else { return impl::computeDispatchKeySet(ks, nonFallthroughKeys_); } } template<class... Args> DispatchKeySet getDispatchKeySetUnboxed(const Args&... args) const { auto ks = detail::multi_dispatch_key_set(args...); // Keys that are fallthrough should be skipped if (requiresBitsetPerBackend_) { auto backend_idx = ks.getBackendIndex(); return impl::computeDispatchKeySet(ks, nonFallthroughKeysPerBackend_[backend_idx]); } else { return impl::computeDispatchKeySet(ks, nonFallthroughKeys_); } } void setOperatorHasFallthroughForKey(DispatchKey k, bool has_fallthrough); std::string dumpState() const; void checkInvariants(const FunctionSchema& schema) const; private: static c10::utils::bitset makeBitsetForDispatchArgs(const FunctionSchema& schema) { TORCH_CHECK(schema.arguments().size() <= c10::utils::bitset::NUM_BITS(), "The function schema has ", schema.arguments().size(), " arguments but this PyTorch build only supports ", c10::utils::bitset::NUM_BITS()); c10::utils::bitset dispatch_arg_indices_reverse; for (const auto index : c10::irange(schema.arguments().size())) { if (schema.arguments()[index].type()->isSubtypeOf(*TensorType::get()) || schema.arguments()[index].type()->isSubtypeOf( *ListType::ofTensors()) || schema.arguments()[index].type()->isSubtypeOf( *ListType::ofOptionalTensors()) || schema.arguments()[index].type()->isSubtypeOf( *OptionalType::ofTensor())) { dispatch_arg_indices_reverse.set(schema.arguments().size() - 1 - index); } } return dispatch_arg_indices_reverse; } explicit DispatchKeyExtractor(c10::utils::bitset dispatch_arg_indices_reverse) : dispatch_arg_indices_reverse_(dispatch_arg_indices_reverse) , nonFallthroughKeys_(DispatchKeySet::FULL) , requiresBitsetPerBackend_(false) { for (const auto i : c10::irange(nonFallthroughKeysPerBackend_.size())) { nonFallthroughKeysPerBackend_[i] = DispatchKeySet::FULL; } } // this is a bitset that has ones for each argument index which has to be // considered for dispatch. This avoids having to iterate over the stack // to find all the tensors. The bits are stored in reverse order, i.e. // dispatch_arg_indices_reverse_[i] == true, then the i-th argument from // the top of the stack (i.e. the i-th last argument of the function) // is relevant for dispatch. // dispatch_arg_indices_reverse_ is allowed to have zero bits set; that just means you must do the // fallthrough c10::utils::bitset dispatch_arg_indices_reverse_; // Set of functionality keys for which the operator does NOT have fallthrough kernel. DispatchKeySet nonFallthroughKeys_; // Set of functionality keys for which the operator does NOT have fallthrough kernel, defined PER BACKEND. // This is only needed if we know that the operator has a different set of fallthroughs defined for some backends. std::array<DispatchKeySet, num_backends> nonFallthroughKeysPerBackend_; // Flag to tell us if we can use the single set of nonFallthroughKeys_ (fast path), // or if we need to fall back to the slower path and check nonFallthroughKeysPerBackend_ bool requiresBitsetPerBackend_; }; }
9,669
38.794239
116
h
null
pytorch-main/aten/src/ATen/core/dispatch/OperatorOptions.h
#pragma once #include <cstdint> namespace c10 { enum class AliasAnalysisKind : uint8_t { INTERNAL_SPECIAL_CASE, CONSERVATIVE, // The most conservative alias analysis type, assumes // side-effects. This is the default analysis. FROM_SCHEMA, PURE_FUNCTION }; #if !defined(_MSC_VER) constexpr // Our current MSVC version has a bug that doesn't allow this to be constexpr. #endif inline const char* toString(AliasAnalysisKind aliasAnalysisKind) { return (aliasAnalysisKind == AliasAnalysisKind::CONSERVATIVE) ? "CONSERVATIVE" : (aliasAnalysisKind == AliasAnalysisKind::FROM_SCHEMA) ? "FROM_SCHEMA" : (aliasAnalysisKind == AliasAnalysisKind::PURE_FUNCTION) ? "PURE_FUNCTION" : (aliasAnalysisKind == AliasAnalysisKind::INTERNAL_SPECIAL_CASE) ? "INTERNAL_SPECIAL_CASE" : "UNKNOWN"; } } // namespace c10
923
28.806452
88
h
null
pytorch-main/aten/src/ATen/core/dispatch/RegistrationHandleRAII.h
#pragma once #include <functional> namespace c10 { class RegistrationHandleRAII final { public: explicit RegistrationHandleRAII(std::function<void()> onDestruction) : onDestruction_(std::move(onDestruction)) {} ~RegistrationHandleRAII() { if (onDestruction_) { onDestruction_(); } } RegistrationHandleRAII(const RegistrationHandleRAII&) = delete; RegistrationHandleRAII& operator=(const RegistrationHandleRAII&) = delete; RegistrationHandleRAII(RegistrationHandleRAII&& rhs) noexcept : onDestruction_(std::move(rhs.onDestruction_)) { rhs.onDestruction_ = nullptr; } RegistrationHandleRAII& operator=(RegistrationHandleRAII&& rhs) noexcept { onDestruction_ = std::move(rhs.onDestruction_); rhs.onDestruction_ = nullptr; return *this; } private: std::function<void()> onDestruction_; }; }
858
22.216216
76
h
null
pytorch-main/aten/src/ATen/cpu/vml.h
#pragma once #include <ATen/Config.h> #include <ATen/Parallel.h> #include <ATen/OpMathType.h> #include <ATen/cpu/vec/functional.h> #include <ATen/cpu/vec/vec.h> #include <c10/util/complex.h> // This header implements various unary operations using a MKL VML style // interface. // It implements various functions with a simple interface // For example it enables the user to call vsin(float* out, const float* in, // size) This functions takes a pointer to a continuous output array of floats and // a constant input array. It will then apply sin to each value in the input // array and write the result into the output array. out and in may point to the // same memory, i.e. this fully supports in-place operations. These functions // also implement their own parallelization, so take precautions when calling // these from threaded functions. // When MKL is available it will call into MKL's VML library similar to NumPy // If MKL is not available it will use SLEEF. // This file might be compiled under AVX or AVX2 when called from e.g. // UnaryOpsKernel.cpp #include <algorithm> #include <cstddef> #include <cstdint> #include <cstring> #include <type_traits> #if AT_MKL_ENABLED() && !defined(__APPLE__) #include <mkl.h> #endif namespace at { namespace vml { inline namespace CPU_CAPABILITY { using namespace vec; template <typename scalar_t> inline void vrsqrt(scalar_t* out, scalar_t* in, int64_t size) { parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) { map( [](const Vectorized<scalar_t>& x) { return Vectorized<scalar_t>((scalar_t)(1)) / x.sqrt(); }, out + begin, in + begin, end - begin); }); } // NB: We ignore numerical errors by convention and leave them to the user #define IMPLEMENT_VML(op) \ template <typename scalar_t> \ inline void v##op(scalar_t* out, const scalar_t* in, int64_t size) { \ using vec_t = Vectorized<vec_scalar_t<scalar_t>>; \ vec::map([](vec_t x) { return x.op(); }, out, in, size); \ } \ IMPLEMENT_VML(abs) IMPLEMENT_VML(acos) IMPLEMENT_VML(asin) IMPLEMENT_VML(atan) IMPLEMENT_VML(ceil) IMPLEMENT_VML(cos) // IMPLEMENT_VML(cosh) IMPLEMENT_VML(erf) IMPLEMENT_VML(erfc) IMPLEMENT_VML(erfinv) IMPLEMENT_VML(exp) IMPLEMENT_VML(expm1) IMPLEMENT_VML(floor) IMPLEMENT_VML(i0) IMPLEMENT_VML(i0e) IMPLEMENT_VML(reciprocal) IMPLEMENT_VML(log) IMPLEMENT_VML(log10) IMPLEMENT_VML(log1p) IMPLEMENT_VML(log2) IMPLEMENT_VML(neg) IMPLEMENT_VML(sin) // IMPLEMENT_VML(sinh) IMPLEMENT_VML(sqrt) IMPLEMENT_VML(round) IMPLEMENT_VML(rsqrt) IMPLEMENT_VML(tan) IMPLEMENT_VML(tanh) IMPLEMENT_VML(trunc) IMPLEMENT_VML(lgamma) #if AT_MKL_ENABLED() && !defined(__APPLE__) // NB: LP64 MKL is the most commonly used and thus we assume it here. That means // we need to expect MKL_INT to be of type int, which implies int32_t in most // cases. static_assert( std::is_same<MKL_INT, int32_t>::value, "MKL_INT is assumed to be int32_t"); #define IMPLEMENT_VML_MKL_STUB(op, mklop, type, mkltype) \ template <> \ inline void v##op(type * out, const type * in, int64_t size) { \ int64_t max_mkl_ind = std::numeric_limits<MKL_INT>::max(); \ if (size <= static_cast<int64_t>(max_mkl_ind)) { \ vm##mkltype##mklop( \ size, in, out, VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ } else { \ MKL_INT ind = 0; \ int64_t chunks = size / max_mkl_ind; \ int64_t rest = size % max_mkl_ind; \ for (; ind < chunks; ind++) { \ vm##mkltype##mklop( \ max_mkl_ind, \ in + ind * max_mkl_ind, \ out + ind * max_mkl_ind, \ VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ } \ vm##mkltype##mklop( \ rest, \ in + ind * max_mkl_ind, \ out + ind * max_mkl_ind, \ VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \ } \ } #define IMPLEMENT_VML_MKL(op, mklop) \ IMPLEMENT_VML_MKL_STUB(op, mklop, float, s) \ IMPLEMENT_VML_MKL_STUB(op, mklop, double, d) // NB: abs, cosh and sinh were temporarily disabled due to issues with Apple // NB: expm1 is disabled because on some configs it produces expm1(nan)=-1 IMPLEMENT_VML_MKL(acos, Acos) IMPLEMENT_VML_MKL(asin, Asin) IMPLEMENT_VML_MKL(atan, Atan) IMPLEMENT_VML_MKL(cos, Cos) // IMPLEMENT_VML_MKL(cosh, Cosh) IMPLEMENT_VML_MKL(erf, Erf) IMPLEMENT_VML_MKL(erfc, Erfc) IMPLEMENT_VML_MKL(erfinv, ErfInv) IMPLEMENT_VML_MKL(exp, Exp) // IMPLEMENT_VML_MKL(expm1, Expm1) IMPLEMENT_VML_MKL(log, Ln) IMPLEMENT_VML_MKL(log10, Log10) IMPLEMENT_VML_MKL(sin, Sin) // IMPLEMENT_VML_MKL(sinh, Sinh) IMPLEMENT_VML_MKL(sqrt, Sqrt) IMPLEMENT_VML_MKL(tan, Tan) IMPLEMENT_VML_MKL(tanh, Tanh) IMPLEMENT_VML_MKL(trunc, Trunc) // Not vectorized in MKL version tested // IMPLEMENT_VML_MKL(abs, Abs) // IMPLEMENT_VML_MKL(log1p, Log1p) #if INTEL_MKL_VERSION >= 20180406 IMPLEMENT_VML_MKL(log2, Log2) #endif #endif } // namespace } // namespace vml } // namespace at
6,002
34.311765
82
h
null
pytorch-main/aten/src/ATen/cpu/vec/functional_base.h
#pragma once // DO NOT DEFINE STATIC DATA IN THIS HEADER! // See Note [Do not compile initializers with AVX] #include <ATen/cpu/vec/vec.h> #include <c10/util/irange.h> namespace at { namespace vec { // slow path template <typename scalar_t, typename Op> inline scalar_t vec_reduce_all( const Op& vec_fun, vec::Vectorized<scalar_t> acc_vec, int64_t size) { using Vec = vec::Vectorized<scalar_t>; scalar_t acc_arr[Vec::size()]; acc_vec.store(acc_arr); for (const auto i : c10::irange(1, size)) { std::array<scalar_t, Vec::size()> acc_arr_next = {0}; acc_arr_next[0] = acc_arr[i]; Vec acc_vec_next = Vec::loadu(acc_arr_next.data()); acc_vec = vec_fun(acc_vec, acc_vec_next); } acc_vec.store(acc_arr); return acc_arr[0]; } template <typename scalar_t, typename Op> struct VecReduceAllSIMD { static inline scalar_t apply(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) { return vec_reduce_all(vec_fun, acc_vec, Vectorized<scalar_t>::size()); } }; #if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE) #if defined(CPU_CAPABILITY_AVX2) template <typename Op> struct VecReduceAllSIMD<float, Op> { static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) { using Vec = Vectorized<float>; Vec v = acc_vec; // 128-bit shuffle Vec v1 = _mm256_permute2f128_ps(v, v, 0x1); v = vec_fun(v, v1); // 64-bit shuffle v1 = _mm256_shuffle_ps(v, v, 0x4E); v = vec_fun(v, v1); // 32-bit shuffle v1 = _mm256_shuffle_ps(v, v, 0xB1); v = vec_fun(v, v1); return _mm256_cvtss_f32(v); } }; #endif // defined(CPU_CAPABILITY_AVX2) #if defined(CPU_CAPABILITY_AVX512) template <typename Op> struct VecReduceAllSIMD<float, Op> { static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) { using Vec = Vectorized<float>; Vec v = acc_vec; // 256-bit shuffle Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E); v = vec_fun(v, v1); // 128-bit shuffle v1 = _mm512_shuffle_f32x4(v, v, 0xB1); v = vec_fun(v, v1); // 64-bit shuffle v1 = _mm512_shuffle_ps(v, v, 0x4E); v = vec_fun(v, v1); // 32-bit shuffle v1 = _mm512_shuffle_ps(v, v, 0xB1); v = vec_fun(v, v1); return _mm512_cvtss_f32(v); } }; #endif // defined(CPU_CAPABILITY_AVX512) #endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE) template <typename scalar_t, typename Op> inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) { return VecReduceAllSIMD<scalar_t, Op>::apply(vec_fun, acc_vec); } template <typename scalar_t, typename Op, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) { using Vec = vec::Vectorized<scalar_t>; if (size < Vec::size()) return vec_reduce_all(vec_fun, Vec::loadu(data, size), size); int64_t d = Vec::size(); Vec acc_vec = Vec::loadu(data); for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec data_vec = Vec::loadu(data + d); acc_vec = vec_fun(acc_vec, data_vec); } if (size - d > 0) { Vec data_vec = Vec::loadu(data + d, size - d); acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d); } return vec_reduce_all(vec_fun, acc_vec); } // similar to reduce_all, but reduces into two outputs template <typename scalar_t, typename Op1, typename Op2, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2, const scalar_t* data, int64_t size) { using Vec = vec::Vectorized<scalar_t>; if (size < Vec::size()) { auto loaded_data = Vec::loadu(data, size); return std::pair<scalar_t, scalar_t>( vec_reduce_all(vec_fun1, loaded_data, size), vec_reduce_all(vec_fun2, loaded_data, size)); } int64_t d = Vec::size(); Vec acc_vec1 = Vec::loadu(data); Vec acc_vec2 = Vec::loadu(data); for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec data_vec = Vec::loadu(data + d); acc_vec1 = vec_fun1(acc_vec1, data_vec); acc_vec2 = vec_fun2(acc_vec2, data_vec); } if (size - d > 0) { Vec data_vec = Vec::loadu(data + d, size - d); acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d); acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d); } return std::pair<scalar_t, scalar_t>( vec_reduce_all(vec_fun1, acc_vec1), vec_reduce_all(vec_fun2, acc_vec2)); } template <typename scalar_t, typename MapOp, typename ReduceOp, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline scalar_t map_reduce_all( const MapOp& map_fun, const ReduceOp& red_fun, const scalar_t* data, int64_t size) { using Vec = vec::Vectorized<scalar_t>; if (size < Vec::size()) return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size); int64_t d = Vec::size(); Vec acc_vec = map_fun(Vec::loadu(data)); for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec data_vec = Vec::loadu(data + d); data_vec = map_fun(data_vec); acc_vec = red_fun(acc_vec, data_vec); } if (size - d > 0) { Vec data_vec = Vec::loadu(data + d, size - d); data_vec = map_fun(data_vec); acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); } return vec_reduce_all(red_fun, acc_vec); } template <typename scalar_t, typename MapOp, typename ReduceOp, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline scalar_t map2_reduce_all( const MapOp& map_fun, const ReduceOp& red_fun, const scalar_t* data, const scalar_t* data2, int64_t size) { using Vec = vec::Vectorized<scalar_t>; if (size < Vec::size()) { Vec data_vec = Vec::loadu(data, size); Vec data2_vec = Vec::loadu(data2, size); data_vec = map_fun(data_vec, data2_vec); return vec_reduce_all(red_fun, data_vec, size); } int64_t d = Vec::size(); Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2)); for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec data_vec = Vec::loadu(data + d); Vec data2_vec = Vec::loadu(data2 + d); data_vec = map_fun(data_vec, data2_vec); acc_vec = red_fun(acc_vec, data_vec); } if (size - d > 0) { Vec data_vec = Vec::loadu(data + d, size - d); Vec data2_vec = Vec::loadu(data2 + d, size - d); data_vec = map_fun(data_vec, data2_vec); acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); } return vec_reduce_all(red_fun, acc_vec); } template <typename scalar_t, typename MapOp, typename ReduceOp, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline scalar_t map3_reduce_all( const MapOp& map_fun, const ReduceOp& red_fun, const scalar_t* data, const scalar_t* data2, const scalar_t* data3, int64_t size) { using Vec = vec::Vectorized<scalar_t>; if (size < Vec::size()) { Vec data_vec = Vec::loadu(data, size); Vec data2_vec = Vec::loadu(data2, size); Vec data3_vec = Vec::loadu(data3, size); data_vec = map_fun(data_vec, data2_vec, data3_vec); return vec_reduce_all(red_fun, data_vec, size); } int64_t d = Vec::size(); Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3)); for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec data_vec = Vec::loadu(data + d); Vec data2_vec = Vec::loadu(data2 + d); Vec data3_vec = Vec::loadu(data3 + d); data_vec = map_fun(data_vec, data2_vec, data3_vec); acc_vec = red_fun(acc_vec, data_vec); } if (size - d > 0) { Vec data_vec = Vec::loadu(data + d, size - d); Vec data2_vec = Vec::loadu(data2 + d, size - d); Vec data3_vec = Vec::loadu(data3 + d, size - d); data_vec = map_fun(data_vec, data2_vec, data3_vec); acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d); } return vec_reduce_all(red_fun, acc_vec); } template <typename scalar_t, typename Op, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map( const Op& vec_fun, scalar_t* output_data, const scalar_t* input_data, int64_t size) { using Vec = vec::Vectorized<scalar_t>; int64_t d = 0; for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec output_vec = vec_fun(Vec::loadu(input_data + d)); output_vec.store(output_data + d); } if (size - d > 0) { Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d)); output_vec.store(output_data + d, size - d); } } template <typename scalar_t, typename Op, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map2( const Op& vec_fun, scalar_t* output_data, const scalar_t* input_data, const scalar_t* input_data2, int64_t size) { using Vec = vec::Vectorized<scalar_t>; int64_t d = 0; for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec data_vec = Vec::loadu(input_data + d); Vec data_vec2 = Vec::loadu(input_data2 + d); Vec output_vec = vec_fun(data_vec, data_vec2); output_vec.store(output_data + d); } if (size - d > 0) { Vec data_vec = Vec::loadu(input_data + d, size - d); Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); Vec output_vec = vec_fun(data_vec, data_vec2); output_vec.store(output_data + d, size - d); } } template <typename scalar_t, typename Op, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map3( const Op& vec_fun, scalar_t* output_data, const scalar_t* input_data1, const scalar_t* input_data2, const scalar_t* input_data3, int64_t size) { using Vec = vec::Vectorized<scalar_t>; int64_t d = 0; for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec data_vec1 = Vec::loadu(input_data1 + d); Vec data_vec2 = Vec::loadu(input_data2 + d); Vec data_vec3 = Vec::loadu(input_data3 + d); Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3); output_vec.store(output_data + d); } if (size - d > 0) { Vec data_vec1 = Vec::loadu(input_data1 + d, size - d); Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); Vec data_vec3 = Vec::loadu(input_data3 + d, size - d); Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3); output_vec.store(output_data + d, size - d); } } template <typename scalar_t, typename Op, typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map4( const Op& vec_fun, scalar_t* output_data, const scalar_t* input_data1, const scalar_t* input_data2, const scalar_t* input_data3, const scalar_t* input_data4, int64_t size) { using Vec = vec::Vectorized<scalar_t>; int64_t d = 0; for (; d < size - (size % Vec::size()); d += Vec::size()) { Vec data_vec1 = Vec::loadu(input_data1 + d); Vec data_vec2 = Vec::loadu(input_data2 + d); Vec data_vec3 = Vec::loadu(input_data3 + d); Vec data_vec4 = Vec::loadu(input_data4 + d); Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4); output_vec.store(output_data + d); } if (size - d > 0) { Vec data_vec1 = Vec::loadu(input_data1 + d, size - d); Vec data_vec2 = Vec::loadu(input_data2 + d, size - d); Vec data_vec3 = Vec::loadu(input_data3 + d, size - d); Vec data_vec4 = Vec::loadu(input_data4 + d, size - d); Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4); output_vec.store(output_data + d, size - d); } } }} // namespace at::vec
11,772
34.675758
91
h
null
pytorch-main/aten/src/ATen/cpu/vec/functional_bfloat16.h
#pragma once // DO NOT DEFINE STATIC DATA IN THIS HEADER! // See Note [Do not compile initializers with AVX] #include <ATen/cpu/vec/vec.h> namespace at { namespace vec { // BFloat16 specification template <typename scalar_t> struct VecScalarType { using type = scalar_t; }; template <> struct VecScalarType<BFloat16> { using type = float; }; template <> struct VecScalarType<Half> { using type = float; }; // This is different from at::acc_type since we only need to specialize BFloat16 template <typename scalar_t> using vec_scalar_t = typename VecScalarType<scalar_t>::type; // Vector conversion between float and bfloat16/half template <typename scalar_t, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float(const Vectorized<scalar_t>&); template <> inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<BFloat16> (const Vectorized<BFloat16>& a) { return convert_bfloat16_float(a); } template <> inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<Half> (const Vectorized<Half>& a) { return convert_half_float(a); } template <typename scalar_t, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline Vectorized<scalar_t> convert_from_float(const Vectorized<float>&, const Vectorized<float>&); template <> inline Vectorized<BFloat16> convert_from_float<BFloat16>(const Vectorized<float>& a, const Vectorized<float>& b) { return convert_float_bfloat16(a, b); } template <> inline Vectorized<Half> convert_from_float<Half>(const Vectorized<float>& a, const Vectorized<float>& b) { return convert_float_half(a, b); } // Note that we already have specialized member of Vectorized<scalar_t> for BFloat16 // so the following functions would run smoothly: // using Vec = Vectorized<BFloat16>; // Vec one = Vec(BFloat16(1)); // vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N); // // Then why we still need to specialize "functional"? // If we do specialization at Vectorized<> level, the above example would need 3 pairs of // conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/". // If we do specialization at vec::map<>() level, we have only 1 pair of conversion // of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only. // // The following BFloat16 functionality will only do data type conversion for input // and output vector (reduce functionality will only convert the final scalar back to bf16). // Compared to Vectorized<> specialization, // 1. better performance since we have less data type conversion; // 2. less rounding error since immediate results are kept in fp32; // 3. accumulation done on data type of fp32. // // If you plan to extend this file, please ensure adding unit tests at // aten/src/ATen/test/vec_test_all_types.cpp // template <typename scalar_t, typename Op, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; if (size < bVec::size()) { bVec data_bvec = bVec::loadu(data, size); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); if (size > fVec::size()) { data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size()); return vec_reduce_all<float>(vec_fun, data_fvec0, fVec::size()); } else { return vec_reduce_all<float>(vec_fun, data_fvec0, size); } } int64_t d = bVec::size(); bVec acc_bvec = bVec::loadu(data); fVec acc_fvec0, acc_fvec1; std::tie(acc_fvec0, acc_fvec1) = convert_to_float<scalar_t>(acc_bvec); for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data_bvec = bVec::loadu(data + d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); acc_fvec0 = vec_fun(acc_fvec0, data_fvec0); acc_fvec1 = vec_fun(acc_fvec1, data_fvec1); } if (size - d > 0) { bVec data_bvec = bVec::loadu(data + d, size - d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); if (size - d > fVec::size()) { acc_fvec0 = vec_fun(acc_fvec0, data_fvec0); acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); } else { acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d); } } acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1); return vec_reduce_all<float>(vec_fun, acc_fvec0); } template <typename scalar_t, typename Op1, typename Op2, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2, const scalar_t* data, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; if (size < bVec::size()) { bVec data_bvec = bVec::loadu(data, size); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); if (size > fVec::size()) { fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size()); fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size()); return std::pair<scalar_t, scalar_t>( vec_reduce_all<float>(vec_fun1, acc1_fvec, fVec::size()), vec_reduce_all<float>(vec_fun2, acc2_fvec, fVec::size())); } else { return std::pair<scalar_t, scalar_t>( vec_reduce_all<float>(vec_fun1, data_fvec0, size), vec_reduce_all<float>(vec_fun2, data_fvec0, size)); } } int64_t d = bVec::size(); bVec acc_bvec = bVec::loadu(data); fVec acc1_fvec0, acc1_fvec1; std::tie(acc1_fvec0, acc1_fvec1) = convert_to_float<scalar_t>(acc_bvec); fVec acc2_fvec0, acc2_fvec1; std::tie(acc2_fvec0, acc2_fvec1) = convert_to_float<scalar_t>(acc_bvec); for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data_bvec = bVec::loadu(data + d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0); acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1); acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0); acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1); } if (size - d > 0) { bVec data_bvec = bVec::loadu(data + d, size - d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); if (size - d > fVec::size()) { acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0); acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size()); acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0); acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size()); } else { acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d); acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d); } } acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1); acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1); return std::pair<scalar_t, scalar_t>( vec_reduce_all<float>(vec_fun1, acc1_fvec0), vec_reduce_all<float>(vec_fun2, acc2_fvec0)); } template <typename scalar_t, typename MapOp, typename ReduceOp, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline scalar_t map_reduce_all( const MapOp& map_fun, const ReduceOp& red_fun, const scalar_t* data, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; if (size < bVec::size()) { bVec data_bvec = bVec::loadu(data, size); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); if (size > fVec::size()) { data_fvec0 = map_fun(data_fvec0); data_fvec1 = map_fun(data_fvec1); data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size()); } else { data_fvec0 = map_fun(data_fvec0); return vec_reduce_all<float>(red_fun, data_fvec0, size); } } int64_t d = bVec::size(); bVec acc_bvec = bVec::loadu(data); fVec acc_fvec0, acc_fvec1; std::tie(acc_fvec0, acc_fvec1) = convert_to_float<scalar_t>(acc_bvec); acc_fvec0 = map_fun(acc_fvec0); acc_fvec1 = map_fun(acc_fvec1); for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data_bvec = bVec::loadu(data + d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); data_fvec0 = map_fun(data_fvec0); data_fvec1 = map_fun(data_fvec1); acc_fvec0 = red_fun(acc_fvec0, data_fvec0); acc_fvec1 = red_fun(acc_fvec1, data_fvec1); } if (size - d > 0) { bVec data_bvec = bVec::loadu(data + d, size - d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); if (size - d > fVec::size()) { data_fvec0 = map_fun(data_fvec0); data_fvec1 = map_fun(data_fvec1); acc_fvec0 = red_fun(acc_fvec0, data_fvec0); acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); } else { data_fvec0 = map_fun(data_fvec0); acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); } } acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); return vec_reduce_all<float>(red_fun, acc_fvec0); } template <typename scalar_t, typename MapOp, typename ReduceOp, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline scalar_t map2_reduce_all( const MapOp& map_fun, const ReduceOp& red_fun, const scalar_t* data, const scalar_t* data2, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; if (size < bVec::size()) { bVec data_bvec = bVec::loadu(data, size); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); bVec data2_bvec = bVec::loadu(data2, size); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); if (size > fVec::size()) { data_fvec0 = map_fun(data_fvec0, data2_fvec0); data_fvec1 = map_fun(data_fvec1, data2_fvec1); data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size()); } else { data_fvec0 = map_fun(data_fvec0, data2_fvec0); return vec_reduce_all<float>(red_fun, data_fvec0, size); } } int64_t d = bVec::size(); bVec acc_bvec = bVec::loadu(data); fVec acc_fvec0, acc_fvec1; std::tie(acc_fvec0, acc_fvec1) = convert_to_float<scalar_t>(acc_bvec); bVec acc2_bvec = bVec::loadu(data2); fVec acc2_fvec0, acc2_fvec1; std::tie(acc2_fvec0, acc2_fvec1) = convert_to_float<scalar_t>(acc2_bvec); acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0); acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1); for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data_bvec = bVec::loadu(data + d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); bVec data2_bvec = bVec::loadu(data2 + d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); data_fvec0 = map_fun(data_fvec0, data2_fvec0); data_fvec1 = map_fun(data_fvec1, data2_fvec1); acc_fvec0 = red_fun(acc_fvec0, data_fvec0); acc_fvec1 = red_fun(acc_fvec1, data_fvec1); } if (size - d > 0) { bVec data_bvec = bVec::loadu(data + d, size - d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); bVec data2_bvec = bVec::loadu(data2 + d, size - d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); if (size - d > fVec::size()) { data_fvec0 = map_fun(data_fvec0, data2_fvec0); data_fvec1 = map_fun(data_fvec1, data2_fvec1); acc_fvec0 = red_fun(acc_fvec0, data_fvec0); acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); } else { data_fvec0 = map_fun(data_fvec0, data2_fvec0); acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); } } acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); return vec_reduce_all<float>(red_fun, acc_fvec0); } template <typename scalar_t, typename MapOp, typename ReduceOp, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline scalar_t map3_reduce_all( const MapOp& map_fun, const ReduceOp& red_fun, const scalar_t* data, const scalar_t* data2, const scalar_t* data3, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; if (size < bVec::size()) { bVec data_bvec = bVec::loadu(data, size); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); bVec data2_bvec = bVec::loadu(data2, size); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); bVec data3_bvec = bVec::loadu(data3, size); fVec data3_fvec0, data3_fvec1; std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec); if (size > fVec::size()) { data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size()); return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size()); } else { data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); return vec_reduce_all<float>(red_fun, data_fvec0, size); } } int64_t d = bVec::size(); bVec acc_bvec = bVec::loadu(data); fVec acc_fvec0, acc_fvec1; std::tie(acc_fvec0, acc_fvec1) = convert_to_float<scalar_t>(acc_bvec); bVec acc2_bvec = bVec::loadu(data2); fVec acc2_fvec0, acc2_fvec1; std::tie(acc2_fvec0, acc2_fvec1) = convert_to_float<scalar_t>(acc2_bvec); bVec acc3_bvec = bVec::loadu(data3); fVec acc3_fvec0, acc3_fvec1; std::tie(acc3_fvec0, acc3_fvec1) = convert_to_float<scalar_t>(acc3_bvec); acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0); acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1); for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data_bvec = bVec::loadu(data + d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); bVec data2_bvec = bVec::loadu(data2 + d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); bVec data3_bvec = bVec::loadu(data3 + d); fVec data3_fvec0, data3_fvec1; std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec); data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); acc_fvec0 = red_fun(acc_fvec0, data_fvec0); acc_fvec1 = red_fun(acc_fvec1, data_fvec1); } if (size - d > 0) { bVec data_bvec = bVec::loadu(data + d, size - d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); bVec data2_bvec = bVec::loadu(data2 + d, size - d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); bVec data3_bvec = bVec::loadu(data3 + d, size - d); fVec data3_fvec0, data3_fvec1; std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec); if (size - d > fVec::size()) { data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1); acc_fvec0 = red_fun(acc_fvec0, data_fvec0); acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size()); } else { data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0); acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d); } } acc_fvec0 = red_fun(acc_fvec0, acc_fvec1); return vec_reduce_all<float>(red_fun, acc_fvec0); } template <typename scalar_t, typename Op, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map( const Op& vec_fun, scalar_t* output_data, const scalar_t* input_data, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; int64_t d = 0; for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data_bvec = bVec::loadu(input_data + d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); fVec output_fvec0 = vec_fun(data_fvec0); fVec output_fvec1 = vec_fun(data_fvec1); bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1); output_bvec.store(output_data + d); } if (size - d > 0) { bVec data_bvec = bVec::loadu(input_data + d, size - d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); fVec output_fvec0 = vec_fun(data_fvec0); fVec output_fvec1 = vec_fun(data_fvec1); bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1); output_bvec.store(output_data + d, size - d); } } template <typename scalar_t, typename Op, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map2( const Op& vec_fun, scalar_t* output_data, const scalar_t* input_data, const scalar_t* input_data2, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; int64_t d = 0; for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data_bvec = bVec::loadu(input_data + d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); bVec data2_bvec = bVec::loadu(input_data2 + d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0); fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1); bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1); output_bvec.store(output_data + d); } if (size - d > 0) { bVec data_bvec = bVec::loadu(input_data + d, size - d); fVec data_fvec0, data_fvec1; std::tie(data_fvec0, data_fvec1) = convert_to_float<scalar_t>(data_bvec); bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0); fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1); bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1); output_bvec.store(output_data + d, size - d); } } template <typename scalar_t, typename Op, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map3( const Op& vec_fun, scalar_t* output_data, const scalar_t* input_data1, const scalar_t* input_data2, const scalar_t* input_data3, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; int64_t d = 0; for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data1_bvec = bVec::loadu(input_data1 + d); fVec data1_fvec0, data1_fvec1; std::tie(data1_fvec0, data1_fvec1) = convert_to_float<scalar_t>(data1_bvec); bVec data2_bvec = bVec::loadu(input_data2 + d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); bVec data3_bvec = bVec::loadu(input_data3 + d); fVec data3_fvec0, data3_fvec1; std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec); fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0); fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1); bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1); output_bvec.store(output_data + d); } if (size - d > 0) { bVec data1_bvec = bVec::loadu(input_data1 + d, size - d); fVec data1_fvec0, data1_fvec1; std::tie(data1_fvec0, data1_fvec1) = convert_to_float<scalar_t>(data1_bvec); bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); bVec data3_bvec = bVec::loadu(input_data3 + d, size - d); fVec data3_fvec0, data3_fvec1; std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec); fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0); fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1); bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1); output_bvec.store(output_data + d, size - d); } } template <typename scalar_t, typename Op, typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0> inline void map4( const Op& vec_fun, scalar_t* output_data, const scalar_t* input_data1, const scalar_t* input_data2, const scalar_t* input_data3, const scalar_t* input_data4, int64_t size) { using bVec = vec::Vectorized<scalar_t>; using fVec = vec::Vectorized<float>; int64_t d = 0; for (; d < size - (size % bVec::size()); d += bVec::size()) { bVec data1_bvec = bVec::loadu(input_data1 + d); fVec data1_fvec0, data1_fvec1; std::tie(data1_fvec0, data1_fvec1) = convert_to_float<scalar_t>(data1_bvec); bVec data2_bvec = bVec::loadu(input_data2 + d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); bVec data3_bvec = bVec::loadu(input_data3 + d); fVec data3_fvec0, data3_fvec1; std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec); bVec data4_bvec = bVec::loadu(input_data4 + d); fVec data4_fvec0, data4_fvec1; std::tie(data4_fvec0, data4_fvec1) = convert_to_float<scalar_t>(data4_bvec); fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0); fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1); bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1); output_bvec.store(output_data + d); } if (size - d > 0) { bVec data1_bvec = bVec::loadu(input_data1 + d, size - d); fVec data1_fvec0, data1_fvec1; std::tie(data1_fvec0, data1_fvec1) = convert_to_float<scalar_t>(data1_bvec); bVec data2_bvec = bVec::loadu(input_data2 + d, size - d); fVec data2_fvec0, data2_fvec1; std::tie(data2_fvec0, data2_fvec1) = convert_to_float<scalar_t>(data2_bvec); bVec data3_bvec = bVec::loadu(input_data3 + d, size - d); fVec data3_fvec0, data3_fvec1; std::tie(data3_fvec0, data3_fvec1) = convert_to_float<scalar_t>(data3_bvec); bVec data4_bvec = bVec::loadu(input_data4 + d, size - d); fVec data4_fvec0, data4_fvec1; std::tie(data4_fvec0, data4_fvec1) = convert_to_float<scalar_t>(data4_bvec); fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0); fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1); bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1); output_bvec.store(output_data + d, size - d); } } }} // namespace at::vec
24,270
43.946296
116
h
null
pytorch-main/aten/src/ATen/cpu/vec/intrinsics.h
#pragma once #if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) /* GCC or clang-compatible compiler, targeting x86/x86-64 */ #include <x86intrin.h> #elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__)) /* Clang-compatible compiler, targeting arm neon */ #include <arm_neon.h> #elif defined(_MSC_VER) /* Microsoft C/C++-compatible compiler */ #include <intrin.h> #if _MSC_VER <= 1900 #define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2)) #define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4)) #define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8)) #define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16)) #endif #elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__)) /* GCC-compatible compiler, targeting ARM with NEON */ #include <arm_neon.h> #if defined (MISSING_ARM_VLD1) #include <ATen/cpu/vec/vec256/missing_vld1_neon.h> #elif defined (MISSING_ARM_VST1) #include <ATen/cpu/vec/vec256/missing_vst1_neon.h> #endif #elif defined(__GNUC__) && defined(__IWMMXT__) /* GCC-compatible compiler, targeting ARM with WMMX */ #include <mmintrin.h> #elif defined(__s390x__) // targets Z/architecture // we will include vecintrin later #elif (defined(__GNUC__) || defined(__xlC__)) && \ (defined(__VEC__) || defined(__ALTIVEC__)) /* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */ #include <altivec.h> /* We need to undef those tokens defined by <altivec.h> to avoid conflicts with the C++ types. => Can still use __bool/__vector */ #undef bool #undef vector #undef pixel #elif defined(__GNUC__) && defined(__SPE__) /* GCC-compatible compiler, targeting PowerPC with SPE */ #include <spe.h> #endif
1,880
41.75
98
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec.h
#pragma once #if defined(CPU_CAPABILITY_AVX512) #include <ATen/cpu/vec/vec512/vec512.h> #else #include <ATen/cpu/vec/vec256/vec256.h> #endif namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { inline Vectorized<bool> convert_to_bool(Vectorized<int8_t> x) { __at_align__ bool buffer[x.size()]; x.ne(Vectorized<int8_t>(0)).store(buffer); Vectorized<bool> ret; static_assert(x.size() == ret.size(), ""); std::memcpy(ret, buffer, ret.size() * sizeof(bool)); return ret; } template <> inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr) { // See NOTE [Loading boolean values] return convert_to_bool(Vectorized<int8_t>::loadu(ptr)); } template <> inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr, int64_t count) { // See NOTE [Loading boolean values] return convert_to_bool(Vectorized<int8_t>::loadu(ptr, count)); } template <typename VT> struct VecHoldType { using hold_type = typename VT::value_type; }; template <> struct VecHoldType<Vectorized<BFloat16>> { using hold_type = BFloat16; }; template <> struct VecHoldType<Vectorized<Half>> {using hold_type = Half; }; template <typename VT> using vechold_type = typename VecHoldType<VT>::hold_type; }}} // namespace at::vec::CPU_CAPABILITY
1,296
25.469388
81
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/missing_vld1_neon.h
/* Workaround for missing vld1_*_x2 and vst1_*_x2 intrinsics in gcc-7. */ __extension__ extern __inline uint8x8x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_u8_x2 (const uint8_t *__a) { uint8x8x2_t ret; asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline int8x8x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_s8_x2 (const int8_t *__a) { int8x8x2_t ret; asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline uint16x4x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_u16_x2 (const uint16_t *__a) { uint16x4x2_t ret; asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline int16x4x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_s16_x2 (const int16_t *__a) { int16x4x2_t ret; asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline uint32x2x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_u32_x2 (const uint32_t *__a) { uint32x2x2_t ret; asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline int32x2x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_s32_x2 (const int32_t *__a) { int32x2x2_t ret; asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline uint64x1x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_u64_x2 (const uint64_t *__a) { uint64x1x2_t ret; asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline int64x1x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_s64_x2 (const int64_t *__a) { int64x1x2_t ret; __builtin_aarch64_simd_oi __o; asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline float16x4x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_f16_x2 (const float16_t *__a) { float16x4x2_t ret; asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline float32x2x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_f32_x2 (const float32_t *__a) { float32x2x2_t ret; asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline float64x1x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_f64_x2 (const float64_t *__a) { float64x1x2_t ret; asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline poly8x8x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_p8_x2 (const poly8_t *__a) { poly8x8x2_t ret; asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline poly16x4x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_p16_x2 (const poly16_t *__a) { poly16x4x2_t ret; asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline poly64x1x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1_p64_x2 (const poly64_t *__a) { poly64x1x2_t ret; asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline uint8x16x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_u8_x2 (const uint8_t *__a) { uint8x16x2_t ret; asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline int8x16x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_s8_x2 (const int8_t *__a) { int8x16x2_t ret; asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline uint16x8x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_u16_x2 (const uint16_t *__a) { uint16x8x2_t ret; asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline int16x8x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_s16_x2 (const int16_t *__a) { int16x8x2_t ret; asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline uint32x4x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_u32_x2 (const uint32_t *__a) { uint32x4x2_t ret; asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline int32x4x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_s32_x2 (const int32_t *__a) { int32x4x2_t ret; asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline uint64x2x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_u64_x2 (const uint64_t *__a) { uint64x2x2_t ret; asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline int64x2x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_s64_x2 (const int64_t *__a) { int64x2x2_t ret; asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline float16x8x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_f16_x2 (const float16_t *__a) { float16x8x2_t ret; asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline float32x4x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_f32_x2 (const float32_t *__a) { float32x4x2_t ret; asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline float64x2x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_f64_x2 (const float64_t *__a) { float64x2x2_t ret; asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline poly8x16x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_p8_x2 (const poly8_t *__a) { poly8x16x2_t ret; asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline poly16x8x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_p16_x2 (const poly16_t *__a) { poly16x8x2_t ret; asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } __extension__ extern __inline poly64x2x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vld1q_p64_x2 (const poly64_t *__a) { poly64x2x2_t ret; asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a)); return ret; } /* vst1x2 */ __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_s64_x2 (int64_t * __a, int64x1x2_t val) { asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_u64_x2 (uint64_t * __a, uint64x1x2_t val) { asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_f64_x2 (float64_t * __a, float64x1x2_t val) { asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_s8_x2 (int8_t * __a, int8x8x2_t val) { asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_p8_x2 (poly8_t * __a, poly8x8x2_t val) { asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_s16_x2 (int16_t * __a, int16x4x2_t val) { asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_p16_x2 (poly16_t * __a, poly16x4x2_t val) { asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_s32_x2 (int32_t * __a, int32x2x2_t val) { asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_u8_x2 (uint8_t * __a, uint8x8x2_t val) { asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_u16_x2 (uint16_t * __a, uint16x4x2_t val) { asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_u32_x2 (uint32_t * __a, uint32x2x2_t val) { asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_f16_x2 (float16_t * __a, float16x4x2_t val) { asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_f32_x2 (float32_t * __a, float32x2x2_t val) { asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1_p64_x2 (poly64_t * __a, poly64x1x2_t val) { asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_s8_x2 (int8_t * __a, int8x16x2_t val) { asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_p8_x2 (poly8_t * __a, poly8x16x2_t val) { asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_s16_x2 (int16_t * __a, int16x8x2_t val) { asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_p16_x2 (poly16_t * __a, poly16x8x2_t val) { asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_s32_x2 (int32_t * __a, int32x4x2_t val) { asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_s64_x2 (int64_t * __a, int64x2x2_t val) { asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_u8_x2 (uint8_t * __a, uint8x16x2_t val) { asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_u16_x2 (uint16_t * __a, uint16x8x2_t val) { asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_u32_x2 (uint32_t * __a, uint32x4x2_t val) { asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_u64_x2 (uint64_t * __a, uint64x2x2_t val) { asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_f16_x2 (float16_t * __a, float16x8x2_t val) { asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_f32_x2 (float32_t * __a, float32x4x2_t val) { asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_f64_x2 (float64_t * __a, float64x2x2_t val) { asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); } __extension__ extern __inline void __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vst1q_p64_x2 (poly64_t * __a, poly64x2x2_t val) { asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val)); }
13,559
28.933775
74
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vec256.h
#pragma once // DO NOT DEFINE STATIC DATA IN THIS HEADER! // See Note [Do not compile initializers with AVX] #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #if !(defined(__VSX__) || defined(CPU_CAPABILITY_VSX) || defined(CPU_CAPABILITY_ZVECTOR)) #include <ATen/cpu/vec/vec256/vec256_float.h> #include <ATen/cpu/vec/vec256/vec256_float_neon.h> #include <ATen/cpu/vec/vec256/vec256_bfloat16.h> #include <ATen/cpu/vec/vec256/vec256_double.h> #include <ATen/cpu/vec/vec256/vec256_int.h> #include <ATen/cpu/vec/vec256/vec256_qint.h> #include <ATen/cpu/vec/vec256/vec256_complex_float.h> #include <ATen/cpu/vec/vec256/vec256_complex_double.h> #elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX) #include <ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h> #else #include <ATen/cpu/vec/vec256/zarch/vec256_zarch.h> #include <ATen/cpu/vec/vec256/vec256_bfloat16.h> #endif #include <algorithm> #include <cstddef> #include <cstdint> #include <cstring> #include <ostream> namespace at { namespace vec { // Note [CPU_CAPABILITY namespace] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This header, and all of its subheaders, will be compiled with // different architecture flags for each supported set of vector // intrinsics. So we need to make sure they aren't inadvertently // linked together. We do this by declaring objects in an `inline // namespace` which changes the name mangling, but can still be // accessed as `at::vec`. inline namespace CPU_CAPABILITY { inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { stream << val.val_; return stream; } inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { stream << static_cast<int>(val.val_); return stream; } inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { stream << static_cast<unsigned int>(val.val_); return stream; } template <typename T> std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) { T buf[Vectorized<T>::size()]; vec.store(buf); stream << "vec["; for (int i = 0; i != Vectorized<T>::size(); i++) { if (i != 0) { stream << ", "; } stream << buf[i]; } stream << "]"; return stream; } #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<> inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) { return _mm256_castpd_ps(src); } template<> inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) { return _mm256_castps_pd(src); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<int64_t scale = 1> std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>> inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) { return _mm256_i64gather_pd(base_addr, vindex, scale); } template<int64_t scale = 1> std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>> inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) { return _mm256_i32gather_ps(base_addr, vindex, scale); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<int64_t scale = 1> std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>> inline mask_gather(const Vectorized<double>& src, const double* base_addr, const Vectorized<int64_t>& vindex, const Vectorized<double>& mask) { return _mm256_mask_i64gather_pd(src, base_addr, vindex, mask, scale); } template<int64_t scale = 1> std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>> inline mask_gather(const Vectorized<float>& src, const float* base_addr, const Vectorized<int32_t>& vindex, const Vectorized<float>& mask) { return _mm256_mask_i32gather_ps(src, base_addr, vindex, mask, scale); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Only works for inputs in the range: [-2^51, 2^51] // From: https://stackoverflow.com/a/41148578 template<> Vectorized<int64_t> inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) { auto x = _mm256_add_pd(src, _mm256_set1_pd(0x0018000000000000)); return _mm256_sub_epi64( _mm256_castpd_si256(x), _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000)) ); } template<> Vectorized<int32_t> inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) { return _mm256_cvttps_epi32(src); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <> std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) { // inputs: // a = {a0, a1, a3, a3} // b = {b0, b1, b2, b3} // swap lanes: // a_swapped = {a0, a1, b0, b1} // b_swapped = {a2, a3, b2, b3} auto a_swapped = _mm256_permute2f128_pd(a, b, 0b0100000); // 0, 2. 4 bits apart auto b_swapped = _mm256_permute2f128_pd(a, b, 0b0110001); // 1, 3. 4 bits apart // group cols crossing lanes: // return {a0, b0, a1, b1} // {a2, b2, a3, b3} return std::make_pair(_mm256_permute4x64_pd(a_swapped, 0b11011000), // 0, 2, 1, 3 _mm256_permute4x64_pd(b_swapped, 0b11011000)); // 0, 2, 1, 3 } template <> std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) { // inputs: // a = {a0, a1, a2, a3, a4, a5, a6, a7} // b = {b0, b1, b2, b3, b4, b5, b6, b7} // swap lanes: // a_swapped = {a0, a1, a2, a3, b0, b1, b2, b3} // b_swapped = {a4, a5, a6, a7, b4, b5, b6, b7} // TODO: can we support caching this? auto a_swapped = _mm256_permute2f128_ps(a, b, 0b0100000); // 0, 2. 4 bits apart auto b_swapped = _mm256_permute2f128_ps(a, b, 0b0110001); // 1, 3. 4 bits apart // group cols crossing lanes: // return {a0, b0, a1, b1, a2, b2, a3, b3} // {a4, b4, a5, b5, a6, b6, a7, b7} const __m256i group_ctrl = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7); return std::make_pair(_mm256_permutevar8x32_ps(a_swapped, group_ctrl), _mm256_permutevar8x32_ps(b_swapped, group_ctrl)); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <> std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) { // inputs: // a = {a0, b0, a1, b1} // b = {a2, b2, a3, b3} // group cols crossing lanes: // a_grouped = {a0, a1, b0, b1} // b_grouped = {a2, a3, b2, b3} auto a_grouped = _mm256_permute4x64_pd(a, 0b11011000); // 0, 2, 1, 3 auto b_grouped = _mm256_permute4x64_pd(b, 0b11011000); // 0, 2, 1, 3 // swap lanes: // return {a0, a1, a2, a3} // {b0, b1, b2, b3} return std::make_pair(_mm256_permute2f128_pd(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart _mm256_permute2f128_pd(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart } template <> std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) { // inputs: // a = {a0, b0, a1, b1, a2, b2, a3, b3} // b = {a4, b4, a5, b5, a6, b6, a7, b7} // group cols crossing lanes: // a_grouped = {a0, a1, a2, a3, b0, b1, b2, b3} // b_grouped = {a4, a5, a6, a7, b4, b5, b6, b7} // TODO: can we support caching this? const __m256i group_ctrl = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7); auto a_grouped = _mm256_permutevar8x32_ps(a, group_ctrl); auto b_grouped = _mm256_permutevar8x32_ps(b, group_ctrl); // swap lanes: // return {a0, a1, a2, a3, a4, a5, a6, a7} // {b0, b1, b2, b3, b4, b5, b6, b7} return std::make_pair(_mm256_permute2f128_ps(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart _mm256_permute2f128_ps(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<> inline Vectorized<float> flip(const Vectorized<float> & v) { const __m256i mask_float = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); return _mm256_permutevar8x32_ps(v, mask_float); } template<> inline Vectorized<double> flip(const Vectorized<double> & v) { return _mm256_permute4x64_pd(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3) } template<> inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) { return _mm256_permute4x64_epi64(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3) } template<> inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) { const __m256i mask_int32 = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7); return _mm256_permutevar8x32_epi32(v, mask_int32); } template<> inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) { const __m256i mask = _mm256_set_epi8( 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 ); auto reversed = _mm256_shuffle_epi8(v, mask); return _mm256_permute2x128_si256(reversed, reversed, 1); } inline __m256i flip8(const __m256i & v) { const __m256i mask_int8 = _mm256_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ); auto reversed = _mm256_shuffle_epi8(v, mask_int8); return _mm256_permute2x128_si256(reversed, reversed, 1); } template<> inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) { return flip8(v); } template<> inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) { return flip8(v); } #endif // (defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) }}}
9,871
34.131673
105
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vec256_double.h
#pragma once // DO NOT DEFINE STATIC DATA IN THIS HEADER! // See Note [Do not compile initializers with AVX] #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <c10/util/irange.h> #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) #include <sleef.h> #endif namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) template <> class Vectorized<double> { private: __m256d values; public: using value_type = double; using size_type = int; static constexpr size_type size() { return 4; } Vectorized() {} Vectorized(__m256d v) : values(v) {} Vectorized(double val) { values = _mm256_set1_pd(val); } Vectorized(double val1, double val2, double val3, double val4) { values = _mm256_setr_pd(val1, val2, val3, val4); } operator __m256d() const { return values; } template <int64_t mask> static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm256_blend_pd(a.values, b.values, mask); } static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& mask) { return _mm256_blendv_pd(a.values, b.values, mask.values); } template<typename step_t> static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) { return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step); } static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b, int64_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); } return b; } static Vectorized<double> loadu(const void* ptr, int64_t count = size()) { if (count == size()) return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr)); __at_align__ double tmp_values[size()]; // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two // instructions while a loop would be compiled to one instruction. for (const auto i : c10::irange(size())) { tmp_values[i] = 0.0; } std::memcpy( tmp_values, reinterpret_cast<const double*>(ptr), count * sizeof(double)); return _mm256_load_pd(tmp_values); } void store(void* ptr, int count = size()) const { if (count == size()) { _mm256_storeu_pd(reinterpret_cast<double*>(ptr), values); } else if (count > 0) { double tmp_values[size()]; _mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values); std::memcpy(ptr, tmp_values, count * sizeof(double)); } } const double& operator[](int idx) const = delete; double& operator[](int idx) = delete; int zero_mask() const { // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit __m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ); return _mm256_movemask_pd(cmp); } Vectorized<double> isnan() const { return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q); } Vectorized<double> map(double (*const f)(double)) const { __at_align__ double tmp[size()]; store(tmp); for (const auto i : c10::irange(size())) { tmp[i] = f(tmp[i]); } return loadu(tmp); } Vectorized<double> abs() const { auto mask = _mm256_set1_pd(-0.f); return _mm256_andnot_pd(mask, values); } Vectorized<double> angle() const { const auto zero_vec = _mm256_set1_pd(0.f); const auto nan_vec = _mm256_set1_pd(NAN); const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ); const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ); const auto pi = _mm256_set1_pd(c10::pi<double>); const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ); auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask); angle = _mm256_blendv_pd(angle, nan_vec, nan_mask); return angle; } Vectorized<double> real() const { return *this; } Vectorized<double> imag() const { return _mm256_set1_pd(0); } Vectorized<double> conj() const { return *this; } Vectorized<double> acos() const { return Vectorized<double>(Sleef_acosd4_u10(values)); } Vectorized<double> asin() const { return Vectorized<double>(Sleef_asind4_u10(values)); } Vectorized<double> atan() const { return Vectorized<double>(Sleef_atand4_u10(values)); } Vectorized<double> atan2(const Vectorized<double> &b) const { return Vectorized<double>(Sleef_atan2d4_u10(values, b)); } Vectorized<double> copysign(const Vectorized<double> &sign) const { return Vectorized<double>(Sleef_copysignd4(values, sign)); } Vectorized<double> erf() const { return Vectorized<double>(Sleef_erfd4_u10(values)); } Vectorized<double> erfc() const { return Vectorized<double>(Sleef_erfcd4_u15(values)); } Vectorized<double> erfinv() const { return map(calc_erfinv); } Vectorized<double> exp() const { return Vectorized<double>(Sleef_expd4_u10(values)); } Vectorized<double> exp2() const { return Vectorized<double>(Sleef_exp2d4_u10(values)); } Vectorized<double> expm1() const { return Vectorized<double>(Sleef_expm1d4_u10(values)); } Vectorized<double> fmod(const Vectorized<double>& q) const { return Vectorized<double>(Sleef_fmodd4(values, q)); } Vectorized<double> hypot(const Vectorized<double> &b) const { return Vectorized<double>(Sleef_hypotd4_u05(values, b)); } Vectorized<double> i0() const { return map(calc_i0); } Vectorized<double> i0e() const { return map(calc_i0e); } Vectorized<double> igamma(const Vectorized<double> &x) const { __at_align__ double tmp[size()]; __at_align__ double tmp_x[size()]; store(tmp); x.store(tmp_x); for (const auto i : c10::irange(size())) { tmp[i] = calc_igamma(tmp[i], tmp_x[i]); } return loadu(tmp); } Vectorized<double> igammac(const Vectorized<double> &x) const { __at_align__ double tmp[size()]; __at_align__ double tmp_x[size()]; store(tmp); x.store(tmp_x); for (const auto i : c10::irange(size())) { tmp[i] = calc_igammac(tmp[i], tmp_x[i]); } return loadu(tmp); } Vectorized<double> log() const { return Vectorized<double>(Sleef_logd4_u10(values)); } Vectorized<double> log2() const { return Vectorized<double>(Sleef_log2d4_u10(values)); } Vectorized<double> log10() const { return Vectorized<double>(Sleef_log10d4_u10(values)); } Vectorized<double> log1p() const { return Vectorized<double>(Sleef_log1pd4_u10(values)); } Vectorized<double> sin() const { return Vectorized<double>(Sleef_sind4_u10(values)); } Vectorized<double> sinh() const { return Vectorized<double>(Sleef_sinhd4_u10(values)); } Vectorized<double> cos() const { return Vectorized<double>(Sleef_cosd4_u10(values)); } Vectorized<double> cosh() const { return Vectorized<double>(Sleef_coshd4_u10(values)); } Vectorized<double> ceil() const { return _mm256_ceil_pd(values); } Vectorized<double> floor() const { return _mm256_floor_pd(values); } Vectorized<double> frac() const; Vectorized<double> neg() const { return _mm256_xor_pd(_mm256_set1_pd(-0.), values); } Vectorized<double> nextafter(const Vectorized<double> &b) const { return Vectorized<double>(Sleef_nextafterd4(values, b)); } Vectorized<double> round() const { return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); } Vectorized<double> tan() const { return Vectorized<double>(Sleef_tand4_u10(values)); } Vectorized<double> tanh() const { return Vectorized<double>(Sleef_tanhd4_u10(values)); } Vectorized<double> trunc() const { return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); } Vectorized<double> lgamma() const { return Vectorized<double>(Sleef_lgammad4_u10(values)); } Vectorized<double> sqrt() const { return _mm256_sqrt_pd(values); } Vectorized<double> reciprocal() const { return _mm256_div_pd(_mm256_set1_pd(1), values); } Vectorized<double> rsqrt() const { return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values)); } Vectorized<double> pow(const Vectorized<double> &b) const { return Vectorized<double>(Sleef_powd4_u10(values, b)); } // Comparison using the _CMP_**_OQ predicate. // `O`: get false if an operand is NaN // `Q`: do not raise if an operand is NaN Vectorized<double> operator==(const Vectorized<double>& other) const { return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ); } Vectorized<double> operator!=(const Vectorized<double>& other) const { return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ); } Vectorized<double> operator<(const Vectorized<double>& other) const { return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ); } Vectorized<double> operator<=(const Vectorized<double>& other) const { return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ); } Vectorized<double> operator>(const Vectorized<double>& other) const { return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ); } Vectorized<double> operator>=(const Vectorized<double>& other) const { return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ); } Vectorized<double> eq(const Vectorized<double>& other) const; Vectorized<double> ne(const Vectorized<double>& other) const; Vectorized<double> lt(const Vectorized<double>& other) const; Vectorized<double> le(const Vectorized<double>& other) const; Vectorized<double> gt(const Vectorized<double>& other) const; Vectorized<double> ge(const Vectorized<double>& other) const; }; template <> Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm256_add_pd(a, b); } template <> Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm256_sub_pd(a, b); } template <> Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm256_mul_pd(a, b); } template <> Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm256_div_pd(a, b); } // frac. Implement this here so we can use subtraction. inline Vectorized<double> Vectorized<double>::frac() const { return *this - this->trunc(); } // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if // either input is a NaN. template <> Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) { Vectorized<double> max = _mm256_max_pd(a, b); Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q); // Exploit the fact that all-ones is a NaN. return _mm256_or_pd(max, isnan); } // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if // either input is a NaN. template <> Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) { Vectorized<double> min = _mm256_min_pd(a, b); Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q); // Exploit the fact that all-ones is a NaN. return _mm256_or_pd(min, isnan); } template <> Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) { return _mm256_min_pd(max, _mm256_max_pd(min, a)); } template <> Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) { return _mm256_max_pd(min, a); } template <> Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) { return _mm256_min_pd(max, a); } template <> Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm256_and_pd(a, b); } template <> Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm256_or_pd(a, b); } template <> Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm256_xor_pd(a, b); } inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const { return (*this == other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const { return (*this != other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const { return (*this > other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const { return (*this >= other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const { return (*this < other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const { return (*this <= other) & Vectorized<double>(1.0); } template <> inline void convert(const double* src, double* dst, int64_t n) { int64_t i; #pragma unroll for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) { _mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i)); } #pragma unroll for (; i < n; i++) { dst[i] = src[i]; } } #ifdef CPU_CAPABILITY_AVX2 template <> Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) { return _mm256_fmadd_pd(a, b, c); } template <> Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) { return _mm256_fmsub_pd(a, b, c); } #endif #endif }}}
14,128
32.011682
124
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vec256_float.h
#pragma once // DO NOT DEFINE STATIC DATA IN THIS HEADER! // See Note [Do not compile initializers with AVX] #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <c10/util/irange.h> #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) #include <sleef.h> #endif namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER) template <> class Vectorized<float> { private: __m256 values; public: using value_type = float; using size_type = int; static constexpr size_type size() { return 8; } Vectorized() {} Vectorized(__m256 v) : values(v) {} Vectorized(float val) { values = _mm256_set1_ps(val); } Vectorized(float val1, float val2, float val3, float val4, float val5, float val6, float val7, float val8) { values = _mm256_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8); } operator __m256() const { return values; } template <int64_t mask> static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) { return _mm256_blend_ps(a.values, b.values, mask); } static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& mask) { return _mm256_blendv_ps(a.values, b.values, mask.values); } template<typename step_t> static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) { return Vectorized<float>( base, base + step, base + 2 * step, base + 3 * step, base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); } static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b, int64_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); case 4: return blend<15>(a, b); case 5: return blend<31>(a, b); case 6: return blend<63>(a, b); case 7: return blend<127>(a, b); } return b; } static Vectorized<float> loadu(const void* ptr, int64_t count = size()) { if (count == size()) return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr)); __at_align__ float tmp_values[size()]; // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two // instructions while a loop would be compiled to one instruction. for (const auto i : c10::irange(size())) { tmp_values[i] = 0.0; } std::memcpy( tmp_values, reinterpret_cast<const float*>(ptr), count * sizeof(float)); return _mm256_loadu_ps(tmp_values); } void store(void* ptr, int64_t count = size()) const { if (count == size()) { _mm256_storeu_ps(reinterpret_cast<float*>(ptr), values); } else if (count > 0) { float tmp_values[size()]; _mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values); std::memcpy(ptr, tmp_values, count * sizeof(float)); } } const float& operator[](int idx) const = delete; float& operator[](int idx) = delete; int zero_mask() const { // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit __m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ); return _mm256_movemask_ps(cmp); } Vectorized<float> isnan() const { return _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_UNORD_Q); } Vectorized<float> map(float (*const f)(float)) const { __at_align__ float tmp[size()]; store(tmp); for (const auto i : c10::irange(size())) { tmp[i] = f(tmp[i]); } return loadu(tmp); } Vectorized<float> abs() const { auto mask = _mm256_set1_ps(-0.f); return _mm256_andnot_ps(mask, values); } Vectorized<float> angle() const { const auto zero_vec = _mm256_set1_ps(0.f); const auto nan_vec = _mm256_set1_ps(NAN); const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ); const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ); const auto pi = _mm256_set1_ps(c10::pi<float>); const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ); auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask); angle = _mm256_blendv_ps(angle, nan_vec, nan_mask); return angle; } Vectorized<float> real() const { return *this; } Vectorized<float> imag() const { return _mm256_set1_ps(0); } Vectorized<float> conj() const { return *this; } Vectorized<float> acos() const { return Vectorized<float>(Sleef_acosf8_u10(values)); } Vectorized<float> asin() const { return Vectorized<float>(Sleef_asinf8_u10(values)); } Vectorized<float> atan() const { return Vectorized<float>(Sleef_atanf8_u10(values)); } Vectorized<float> atan2(const Vectorized<float> &b) const { return Vectorized<float>(Sleef_atan2f8_u10(values, b)); } Vectorized<float> copysign(const Vectorized<float> &sign) const { return Vectorized<float>(Sleef_copysignf8(values, sign)); } Vectorized<float> erf() const { // constants const auto neg_zero_vec = _mm256_set1_ps(-0.f); const auto one_vec = _mm256_set1_ps(1.0f); const auto p = _mm256_set1_ps(0.3275911f); const auto p1 = _mm256_set1_ps(0.254829592f); const auto p2 = _mm256_set1_ps(-0.284496736f); const auto p3 = _mm256_set1_ps(1.421413741f); const auto p4 = _mm256_set1_ps(-1.453152027f); const auto p5 = _mm256_set1_ps(1.061405429f); // sign(x) auto sign_mask = _mm256_and_ps(neg_zero_vec, values); auto abs_vec = _mm256_xor_ps(sign_mask, values); // t = 1 / (p * abs(x) + 1) auto tmp0 = _mm256_fmadd_ps(p, abs_vec, one_vec); auto t = _mm256_div_ps(one_vec, tmp0); // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1 auto tmp1 = _mm256_fmadd_ps(p5, t, p4); auto tmp2 = _mm256_fmadd_ps(tmp1, t, p3); auto tmp3 = _mm256_fmadd_ps(tmp2, t, p2); auto r = _mm256_fmadd_ps(tmp3, t, p1); // - exp(- x * x) auto pow_2 = _mm256_mul_ps(values, values); auto neg_pow_2 = _mm256_xor_ps(neg_zero_vec, pow_2); // auto tmp4 = exp(neg_pow_2); auto tmp4 = Vectorized<float>(Sleef_expf8_u10(neg_pow_2)); auto tmp5 = _mm256_xor_ps(neg_zero_vec, tmp4); // erf(x) = sign(x) * (1 - r * t * exp(- x * x)) auto tmp6 = _mm256_mul_ps(tmp5, t); auto tmp7 = _mm256_fmadd_ps(tmp6, r, one_vec); return _mm256_xor_ps(sign_mask, tmp7); } Vectorized<float> erfc() const { return Vectorized<float>(Sleef_erfcf8_u15(values)); } Vectorized<float> erfinv() const { return map(calc_erfinv); } Vectorized<float> exp() const { return Vectorized<float>(Sleef_expf8_u10(values)); } Vectorized<float> exp2() const { return Vectorized<float>(Sleef_exp2f8_u10(values)); } Vectorized<float> expm1() const { return Vectorized<float>(Sleef_expm1f8_u10(values)); } Vectorized<float> fmod(const Vectorized<float>& q) const { return Vectorized<float>(Sleef_fmodf8(values, q)); } Vectorized<float> log() const { return Vectorized<float>(Sleef_logf8_u10(values)); } Vectorized<float> log2() const { return Vectorized<float>(Sleef_log2f8_u10(values)); } Vectorized<float> log10() const { return Vectorized<float>(Sleef_log10f8_u10(values)); } Vectorized<float> log1p() const { return Vectorized<float>(Sleef_log1pf8_u10(values)); } Vectorized<float> frac() const; Vectorized<float> sin() const { return Vectorized<float>(Sleef_sinf8_u35(values)); } Vectorized<float> sinh() const { return Vectorized<float>(Sleef_sinhf8_u10(values)); } Vectorized<float> cos() const { return Vectorized<float>(Sleef_cosf8_u35(values)); } Vectorized<float> cosh() const { return Vectorized<float>(Sleef_coshf8_u10(values)); } Vectorized<float> ceil() const { return _mm256_ceil_ps(values); } Vectorized<float> floor() const { return _mm256_floor_ps(values); } Vectorized<float> hypot(const Vectorized<float> &b) const { return Vectorized<float>(Sleef_hypotf8_u05(values, b)); } Vectorized<float> i0() const { return map(calc_i0); } Vectorized<float> i0e() const { return map(calc_i0e); } Vectorized<float> igamma(const Vectorized<float> &x) const { __at_align__ float tmp[size()]; __at_align__ float tmp_x[size()]; store(tmp); x.store(tmp_x); for (const auto i : c10::irange(size())) { tmp[i] = calc_igamma(tmp[i], tmp_x[i]); } return loadu(tmp); } Vectorized<float> igammac(const Vectorized<float> &x) const { __at_align__ float tmp[size()]; __at_align__ float tmp_x[size()]; store(tmp); x.store(tmp_x); for (const auto i : c10::irange(size())) { tmp[i] = calc_igammac(tmp[i], tmp_x[i]); } return loadu(tmp); } Vectorized<float> neg() const { return _mm256_xor_ps(_mm256_set1_ps(-0.f), values); } Vectorized<float> nextafter(const Vectorized<float> &b) const { return Vectorized<float>(Sleef_nextafterf8(values, b)); } Vectorized<float> round() const { return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); } Vectorized<float> tan() const { return Vectorized<float>(Sleef_tanf8_u10(values)); } Vectorized<float> tanh() const { return Vectorized<float>(Sleef_tanhf8_u10(values)); } Vectorized<float> trunc() const { return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); } Vectorized<float> lgamma() const { return Vectorized<float>(Sleef_lgammaf8_u10(values)); } Vectorized<float> sqrt() const { return _mm256_sqrt_ps(values); } Vectorized<float> reciprocal() const { return _mm256_div_ps(_mm256_set1_ps(1), values); } Vectorized<float> rsqrt() const { return _mm256_div_ps(_mm256_set1_ps(1), _mm256_sqrt_ps(values)); } Vectorized<float> pow(const Vectorized<float> &b) const { return Vectorized<float>(Sleef_powf8_u10(values, b)); } // Comparison using the _CMP_**_OQ predicate. // `O`: get false if an operand is NaN // `Q`: do not raise if an operand is NaN Vectorized<float> operator==(const Vectorized<float>& other) const { return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ); } Vectorized<float> operator!=(const Vectorized<float>& other) const { return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ); } Vectorized<float> operator<(const Vectorized<float>& other) const { return _mm256_cmp_ps(values, other.values, _CMP_LT_OQ); } Vectorized<float> operator<=(const Vectorized<float>& other) const { return _mm256_cmp_ps(values, other.values, _CMP_LE_OQ); } Vectorized<float> operator>(const Vectorized<float>& other) const { return _mm256_cmp_ps(values, other.values, _CMP_GT_OQ); } Vectorized<float> operator>=(const Vectorized<float>& other) const { return _mm256_cmp_ps(values, other.values, _CMP_GE_OQ); } Vectorized<float> eq(const Vectorized<float>& other) const; Vectorized<float> ne(const Vectorized<float>& other) const; Vectorized<float> gt(const Vectorized<float>& other) const; Vectorized<float> ge(const Vectorized<float>& other) const; Vectorized<float> lt(const Vectorized<float>& other) const; Vectorized<float> le(const Vectorized<float>& other) const; }; template <> Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) { return _mm256_add_ps(a, b); } template <> Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) { return _mm256_sub_ps(a, b); } template <> Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) { return _mm256_mul_ps(a, b); } template <> Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) { return _mm256_div_ps(a, b); } // frac. Implement this here so we can use subtraction inline Vectorized<float> Vectorized<float>::frac() const { return *this - this->trunc(); } // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if // either input is a NaN. template <> Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) { Vectorized<float> max = _mm256_max_ps(a, b); Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q); // Exploit the fact that all-ones is a NaN. return _mm256_or_ps(max, isnan); } // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if // either input is a NaN. template <> Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) { Vectorized<float> min = _mm256_min_ps(a, b); Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q); // Exploit the fact that all-ones is a NaN. return _mm256_or_ps(min, isnan); } template <> Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) { return _mm256_min_ps(max, _mm256_max_ps(min, a)); } template <> Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) { return _mm256_min_ps(max, a); } template <> Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) { return _mm256_max_ps(min, a); } template <> Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) { return _mm256_and_ps(a, b); } template <> Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) { return _mm256_or_ps(a, b); } template <> Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) { return _mm256_xor_ps(a, b); } inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const { return (*this == other) & Vectorized<float>(1.0f); } inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const { return (*this != other) & Vectorized<float>(1.0f); } inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const { return (*this > other) & Vectorized<float>(1.0f); } inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const { return (*this >= other) & Vectorized<float>(1.0f); } inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const { return (*this < other) & Vectorized<float>(1.0f); } inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const { return (*this <= other) & Vectorized<float>(1.0f); } template <> inline void convert(const float* src, float* dst, int64_t n) { int64_t i; #pragma unroll for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) { _mm256_storeu_ps(dst + i, _mm256_loadu_ps(src + i)); } #pragma unroll for (; i < n; i++) { dst[i] = src[i]; } } template <> Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) { return _mm256_fmadd_ps(a, b, c); } template <> Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) { return _mm256_fmsub_ps(a, b, c); } // Used by Inductor CPP codegen template<> inline void transpose_mxn<float, 8, 8>( const float* src, int64_t ld_src, float* dst, int64_t ld_dst) { // load from src to registers // a: a0 a1 a2 a3 a4 a5 a6 a7 // b: b0 b1 b2 b3 b4 b5 b6 b7 // c: c0 c1 c2 c3 c4 c5 c6 c7 // d: d0 d1 d2 d3 d4 d5 d6 d7 // e: e0 e1 e2 e3 e4 e5 e6 e7 // f: f0 f1 f2 f3 f4 f5 f6 f7 // g: g0 g1 g2 g3 g4 g5 g6 g7 // h: h0 h1 h2 h3 h4 h5 h6 h7 __m256 a = _mm256_loadu_ps(&src[0 * ld_src]); __m256 b = _mm256_loadu_ps(&src[1 * ld_src]); __m256 c = _mm256_loadu_ps(&src[2 * ld_src]); __m256 d = _mm256_loadu_ps(&src[3 * ld_src]); __m256 e = _mm256_loadu_ps(&src[4 * ld_src]); __m256 f = _mm256_loadu_ps(&src[5 * ld_src]); __m256 g = _mm256_loadu_ps(&src[6 * ld_src]); __m256 h = _mm256_loadu_ps(&src[7 * ld_src]); __m256 ta, tb, tc, td, te, tf, tg, th; // unpacking and interleaving 32-bit elements // a0 b0 a1 b1 a4 b4 a5 b5 // a2 b2 a3 b3 a6 b6 a7 b7 // c0 d0 c1 d1 ... // c2 d2 c3 d3 ... // e0 f0 e1 f1 ... // e2 f2 e3 f3 ... // g0 h0 g1 h1 ... // g2 h2 g3 h3 ... ta = _mm256_unpacklo_ps(a, b); tb = _mm256_unpackhi_ps(a, b); tc = _mm256_unpacklo_ps(c, d); td = _mm256_unpackhi_ps(c, d); te = _mm256_unpacklo_ps(e, f); tf = _mm256_unpackhi_ps(e, f); tg = _mm256_unpacklo_ps(g, h); th = _mm256_unpackhi_ps(g, h); // unpacking and interleaving 64-bit elements // a0 b0 c0 d0 a4 b4 c4 d4 // a1 b1 c1 d1 ... // a2 b2 c2 d2 ... // a3 b3 c3 d3 ... // e0 f0 g0 h0 e4 f4 g4 h4 // e1 f1 g1 h1 ... // e2 f2 g2 h2 ... // e3 f3 g3 h3 ... a = _mm256_castpd_ps( _mm256_unpacklo_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc))); b = _mm256_castpd_ps( _mm256_unpackhi_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc))); c = _mm256_castpd_ps( _mm256_unpacklo_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td))); d = _mm256_castpd_ps( _mm256_unpackhi_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td))); e = _mm256_castpd_ps( _mm256_unpacklo_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg))); f = _mm256_castpd_ps( _mm256_unpackhi_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg))); g = _mm256_castpd_ps( _mm256_unpacklo_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th))); h = _mm256_castpd_ps( _mm256_unpackhi_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th))); // shuffle 128-bits (composed of 4 32-bit elements) // a0 b0 c0 d0 e0 f0 g0 h0 // a1 b1 c1 d1 ... // a2 b2 c2 d2 ... // a3 b3 c3 d3 ... // a4 b4 c4 d4 ... // a5 b5 c5 d5 ... // a6 b6 c6 d6 ... // a7 b7 c7 d7 ... ta = _mm256_permute2f128_ps(a, e, 0x20); tb = _mm256_permute2f128_ps(b, f, 0x20); tc = _mm256_permute2f128_ps(c, g, 0x20); td = _mm256_permute2f128_ps(d, h, 0x20); te = _mm256_permute2f128_ps(a, e, 0x31); tf = _mm256_permute2f128_ps(b, f, 0x31); tg = _mm256_permute2f128_ps(c, g, 0x31); th = _mm256_permute2f128_ps(d, h, 0x31); // store from registers to dst _mm256_storeu_ps(&dst[0 * ld_dst], ta); _mm256_storeu_ps(&dst[1 * ld_dst], tb); _mm256_storeu_ps(&dst[2 * ld_dst], tc); _mm256_storeu_ps(&dst[3 * ld_dst], td); _mm256_storeu_ps(&dst[4 * ld_dst], te); _mm256_storeu_ps(&dst[5 * ld_dst], tf); _mm256_storeu_ps(&dst[6 * ld_dst], tg); _mm256_storeu_ps(&dst[7 * ld_dst], th); } #endif }}}
19,023
32.910873
120
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> #include <ATen/cpu/vec/vec_base.h> #include <c10/util/irange.h> namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { inline std::tuple<Vectorized<float>, Vectorized<float>> convert_bfloat16_float( const Vectorized<BFloat16>& a) { constexpr int64_t K = Vectorized<BFloat16>::size(); __at_align__ float arr[K]; __at_align__ BFloat16 arr2[K]; a.store(arr2); convert(arr2, arr, K); return std::make_tuple( Vectorized<float>::loadu(arr), Vectorized<float>::loadu(arr + Vectorized<float>::size())); } inline Vectorized<BFloat16> convert_float_bfloat16( const Vectorized<float>& a, const Vectorized<float>& b) { constexpr int64_t K = Vectorized<BFloat16>::size(); __at_align__ float arr[K]; __at_align__ BFloat16 arr2[K]; a.store(arr); b.store(arr + Vectorized<float>::size()); convert(arr, arr2, K); return Vectorized<BFloat16>::loadu(arr2); } inline void load_fp32_from_bf16(const c10::BFloat16* data, Vectorized<float>& out) { __at_align__ float values[Vectorized<float>::size()]; for (const auto k : c10::irange(Vectorized<float>::size())) { values[k] = data[k]; } out = Vectorized<float>::loadu(values); } inline void load_fp32_from_bf16( const c10::BFloat16* data, Vectorized<float>& out1, Vectorized<float>& out2) { load_fp32_from_bf16(data, out1); data += Vectorized<float>::size(); load_fp32_from_bf16(data, out2); } } // namespace } // namespace vec } // namespace at
1,608
27.22807
84
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> // Note: header order is important here #include <ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h> #include <ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h> namespace at { namespace vec { inline namespace CPU_CAPABILITY { DEFINE_CLAMP_FUNCS(c10::quint8) DEFINE_CLAMP_FUNCS(c10::qint8) DEFINE_CLAMP_FUNCS(c10::qint32) DEFINE_CLAMP_FUNCS(int16_t) DEFINE_CLAMP_FUNCS(int32_t) DEFINE_CLAMP_FUNCS(int64_t) DEFINE_CLAMP_FUNCS(float) DEFINE_CLAMP_FUNCS(double) template <> Vectorized<double> C10_ALWAYS_INLINE fmadd( const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) { return Vectorized<double>{ vec_madd(a.vec0(), b.vec0(), c.vec0()), vec_madd(a.vec1(), b.vec1(), c.vec1())}; } template <> Vectorized<int64_t> C10_ALWAYS_INLINE fmadd( const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const Vectorized<int64_t>& c) { return Vectorized<int64_t>{ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; } template <> Vectorized<int32_t> C10_ALWAYS_INLINE fmadd( const Vectorized<int32_t>& a, const Vectorized<int32_t>& b, const Vectorized<int32_t>& c) { return Vectorized<int32_t>{ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; } template <> Vectorized<int16_t> C10_ALWAYS_INLINE fmadd( const Vectorized<int16_t>& a, const Vectorized<int16_t>& b, const Vectorized<int16_t>& c) { return Vectorized<int16_t>{ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()}; } DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float) DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double) DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t) DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t) DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t) template <> Vectorized<int64_t> C10_ALWAYS_INLINE convert_to_int_of_same_size<double>(const Vectorized<double>& src) { return Vectorized<int64_t>{vec_signed(src.vec0()), vec_signed(src.vec1())}; } template <> Vectorized<int32_t> C10_ALWAYS_INLINE convert_to_int_of_same_size<float>( const Vectorized<float>& src) { return Vectorized<int32_t>{vec_signed(src.vec0()), vec_signed(src.vec1())}; } template <> inline void convert(const int32_t* src, float* dst, int64_t n) { // int32_t and float have same size int64_t i; for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) { const int32_t* src_a = src + i; float* dst_a = dst + i; vint32 input_vec0 = vec_vsx_ld(offset0, reinterpret_cast<const vint32*>(src_a)); vint32 input_vec1 = vec_vsx_ld(offset16, reinterpret_cast<const vint32*>(src_a)); vfloat32 c0 = vec_float(input_vec0); vfloat32 c1 = vec_float(input_vec1); vec_vsx_st(c0, offset0, dst_a); vec_vsx_st(c1, offset16, dst_a); } for (; i < n; i++) { dst[i] = static_cast<float>(src[i]); } } template <> inline void convert(const int64_t* src, double* dst, int64_t n) { int64_t i; for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) { const int64_t* src_a = src + i; double* dst_a = dst + i; vint64 input_vec0 = vec_vsx_ld(offset0, reinterpret_cast<const vint64*>(src_a)); vint64 input_vec1 = vec_vsx_ld(offset16, reinterpret_cast<const vint64*>(src_a)); vfloat64 c0 = vec_double(input_vec0); vfloat64 c1 = vec_double(input_vec1); vec_vsx_st(c0, offset0, reinterpret_cast<double*>(dst_a)); vec_vsx_st(c1, offset16, reinterpret_cast<double*>(dst_a)); } for (; i < n; i++) { dst[i] = static_cast<double>(src[i]); } } //Generic implementation to fix compiler error //TO-DO : Add optimized version for ppc64 inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float( const Vectorized<Half>& a) { constexpr int64_t K = Vectorized<Half>::size(); __at_align__ float arr[K]; __at_align__ Half arr2[K]; a.store(arr2); convert(arr2, arr, K); return std::make_tuple( Vectorized<float>::loadu(arr), Vectorized<float>::loadu(arr + Vectorized<float>::size())); } inline Vectorized<Half> convert_float_half( const Vectorized<float>& a, const Vectorized<float>& b) { constexpr int64_t K = Vectorized<Half>::size(); __at_align__ float arr[K]; __at_align__ Half arr2[K]; a.store(arr); b.store(arr + Vectorized<float>::size()); convert(arr, arr2, K); return Vectorized<Half>::loadu(arr2); }; template <> std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>( const Vectorized<double>& a, const Vectorized<double>& b) { // inputs: // a = {a0, a1, a2, a3} // b = {b0, b1, b2, b3} vfloat64 ab00 = vec_xxpermdi(a.vec0(), b.vec0(), 0); vfloat64 ab11 = vec_xxpermdi(a.vec0(), b.vec0(), 3); vfloat64 ab2_00 = vec_xxpermdi(a.vec1(), b.vec1(), 0); vfloat64 ab2_11 = vec_xxpermdi(a.vec1(), b.vec1(), 3); // return {a0, b0, a1, b1} // {a2, b2, a3, b3} return std::make_pair( Vectorized<double>{ab00, ab11}, Vectorized<double>{ab2_00, ab2_11}); } template <> std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>( const Vectorized<double>& a, const Vectorized<double>& b) { // inputs: // a = {a0, b0, a1, b1} // b = {a2, b2, a3, b3} vfloat64 aa01 = vec_xxpermdi(a.vec0(), a.vec1(), 0); vfloat64 aa23 = vec_xxpermdi(b.vec0(), b.vec1(), 0); vfloat64 bb_01 = vec_xxpermdi(a.vec0(), a.vec1(), 3); vfloat64 bb_23 = vec_xxpermdi(b.vec0(), b.vec1(), 3); // swap lanes: // return {a0, a1, a2, a3} // {b0, b1, b2, b3} return std::make_pair( Vectorized<double>{aa01, aa23}, Vectorized<double>{bb_01, bb_23}); } template <> std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>( const Vectorized<float>& a, const Vectorized<float>& b) { // inputs: // a = {a0, a1, a2, a3,, a4, a5, a6, a7} // b = {b0, b1, b2, b3,, b4, b5, b6, b7} vfloat32 ab0011 = vec_mergeh(a.vec0(), b.vec0()); vfloat32 ab2233 = vec_mergel(a.vec0(), b.vec0()); vfloat32 ab2_0011 = vec_mergeh(a.vec1(), b.vec1()); vfloat32 ab2_2233 = vec_mergel(a.vec1(), b.vec1()); // group cols crossing lanes: // return {a0, b0, a1, b1,, a2, b2, a3, b3} // {a4, b4, a5, b5,, a6, b6, a7, b7} return std::make_pair( Vectorized<float>{ab0011, ab2233}, Vectorized<float>{ab2_0011, ab2_2233}); } template <> std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>( const Vectorized<float>& a, const Vectorized<float>& b) { // inputs: // a = {a0, b0, a1, b1,, a2, b2, a3, b3} // b = {a4, b4, a5, b5,, a6, b6, a7, b7} // {a0,a2,b0,b2} {a1,a3,b1,b3} vfloat32 a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1()); vfloat32 a1a3b1b3 = vec_mergel(a.vec0(), a.vec1()); vfloat32 aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3); vfloat32 bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3); vfloat32 a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1()); vfloat32 a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1()); vfloat32 aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2); vfloat32 bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2); // it could be done with vec_perm ,too // swap lanes: // return {a0, a1, a2, a3,, a4, a5, a6, a7} // {b0, b1, b2, b3,, b4, b5, b6, b7} return std::make_pair( Vectorized<float>{aa0123, aa0123_2}, Vectorized<float>{bb0123, bb0123_2}); } } // namespace } // namespace vec } // namespace at
8,052
31.603239
87
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> #include <c10/util/complex.h> #include <c10/util/irange.h> namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { using ComplexDbl = c10::complex<double>; template <> class Vectorized<ComplexDbl> { union { struct { vfloat64 _vec0; vfloat64 _vec1; }; struct { vbool64 _vecb0; vbool64 _vecb1; }; } __attribute__((__may_alias__)); public: using value_type = ComplexDbl; using vec_internal_type = vfloat64; using vec_internal_mask_type = vbool64; using size_type = int; static constexpr size_type size() { return 2; } Vectorized() {} C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {} Vectorized(ComplexDbl val) { double real_value = val.real(); double imag_value = val.imag(); _vec0 = vfloat64{real_value, imag_value}; _vec1 = vfloat64{real_value, imag_value}; } Vectorized(ComplexDbl val1, ComplexDbl val2) { _vec0 = vfloat64{val1.real(), val1.imag()}; _vec1 = vfloat64{val2.real(), val2.imag()}; } C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } template <int64_t mask> static std::enable_if_t<blendChoiceComplexDbl(mask) == 0, Vectorized<ComplexDbl>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) { return a; } template <int64_t mask> static std::enable_if_t<blendChoiceComplexDbl(mask) == 1, Vectorized<ComplexDbl>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) { return b; } template <int64_t mask> static std::enable_if_t<blendChoiceComplexDbl(mask) == 2, Vectorized<ComplexDbl>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) { return {b._vec0, a._vec1}; } template <int64_t mask> static std::enable_if_t<blendChoiceComplexDbl(mask) == 3, Vectorized<ComplexDbl>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) { return {a._vec0, b._vec1}; } template <int64_t mask> static Vectorized<ComplexDbl> C10_ALWAYS_INLINE el_blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) { const vbool64 mask_1st = VsxDblMask1(mask); const vbool64 mask_2nd = VsxDblMask2(mask); return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd)}; } static Vectorized<ComplexDbl> blendv( const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b, const Vectorized<ComplexDbl>& mask) { // convert std::complex<V> index mask to V index mask: xy -> xxyy auto mask_complex = Vectorized<ComplexDbl>(vec_splat(mask._vec0, 0), vec_splat(mask._vec1, 0)); return { vec_sel(a._vec0, b._vec0, mask_complex._vecb0), vec_sel(a._vec1, b._vec1, mask_complex._vecb1)}; } static Vectorized<ComplexDbl> C10_ALWAYS_INLINE elwise_blendv( const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b, const Vectorized<ComplexDbl>& mask) { return { vec_sel(a._vec0, b._vec0, mask._vecb0), vec_sel(a._vec1, b._vec1, mask._vecb1)}; } template <typename step_t> static Vectorized<ComplexDbl> arange( ComplexDbl base = 0., step_t step = static_cast<step_t>(1)) { return Vectorized<ComplexDbl>(base, base + step); } static Vectorized<ComplexDbl> set( const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b, int64_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); } return b; } static Vectorized<value_type> C10_ALWAYS_INLINE loadu(const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const double*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const double*>(ptr))}; } __at_align__ value_type tmp_values[size()] = {}; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return { vec_vsx_ld(offset0, reinterpret_cast<const double*>(tmp_values)), vec_vsx_ld(offset16, reinterpret_cast<const double*>(tmp_values))}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(tmp_values)); vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(tmp_values)); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } const ComplexDbl& operator[](int idx) const = delete; ComplexDbl& operator[](int idx) = delete; Vectorized<ComplexDbl> map(ComplexDbl (*const f)(ComplexDbl)) const { __at_align__ ComplexDbl tmp[size()]; store(tmp); for (const auto i : c10::irange(size())) { tmp[i] = f(tmp[i]); } return loadu(tmp); } Vectorized<ComplexDbl> map(ComplexDbl (*const f)(const ComplexDbl&)) const { __at_align__ ComplexDbl tmp[size()]; store(tmp); for (const auto i : c10::irange(size())) { tmp[i] = f(tmp[i]); } return loadu(tmp); } Vectorized<ComplexDbl> el_swapped() const { vfloat64 v0 = vec_xxpermdi(_vec0, _vec0, 2); vfloat64 v1 = vec_xxpermdi(_vec1, _vec1, 2); return {v0, v1}; } Vectorized<ComplexDbl> el_madd( const Vectorized<ComplexDbl>& multiplier, const Vectorized<ComplexDbl>& val) const { return { vec_madd(_vec0, multiplier._vec0, val._vec0), vec_madd(_vec1, multiplier._vec1, val._vec1)}; } Vectorized<ComplexDbl> el_mergeo() const { vfloat64 v0 = vec_splat(_vec0, 1); vfloat64 v1 = vec_splat(_vec1, 1); return {v0, v1}; } Vectorized<ComplexDbl> el_mergee() const { vfloat64 v0 = vec_splat(_vec0, 0); vfloat64 v1 = vec_splat(_vec1, 0); return {v0, v1}; } static Vectorized<ComplexDbl> el_mergee( Vectorized<ComplexDbl>& first, Vectorized<ComplexDbl>& second) { // as mergee phased in , we can use vec_perm with mask return { vec_mergeh(first._vec0, second._vec0), vec_mergeh(first._vec1, second._vec1)}; } Vectorized<ComplexDbl> abs_2_() const { auto a = (*this).elwise_mult(*this); auto permuted = a.el_swapped(); a = a + permuted; return a; } Vectorized<ComplexDbl> abs_() const { auto ret = abs_2_(); return ret.elwise_sqrt(); } Vectorized<ComplexDbl> abs() const { return abs_() & vd_real_mask; } Vectorized<ComplexDbl> angle_() const { // angle = atan2(b/a) // auto b_a = _mm256_permute_pd(values, 0x05); // b a // return Sleef_atan2d4_u10(values, b_a); // 90-angle angle Vectorized<ComplexDbl> ret; ret._vec0[0] = std::atan2(_vec0[1], _vec0[0]); ret._vec1[0] = std::atan2(_vec1[1], _vec1[0]); return ret; } Vectorized<ComplexDbl> angle() const { return angle_() & vd_real_mask; } Vectorized<ComplexDbl> real_() const { return *this & vd_real_mask; } Vectorized<ComplexDbl> real() const { return *this & vd_real_mask; } Vectorized<ComplexDbl> imag_() const { return *this & vd_imag_mask; } Vectorized<ComplexDbl> imag() const { return imag_().el_swapped(); } Vectorized<ComplexDbl> conj_() const { return *this ^ vd_isign_mask; } Vectorized<ComplexDbl> conj() const { return *this ^ vd_isign_mask; } Vectorized<ComplexDbl> log() const { // Most trigonomic ops use the log() op to improve complex number // performance. return map(std::log); } Vectorized<ComplexDbl> log2() const { // log2eB_inv auto ret = log(); return ret.elwise_mult(vd_log2e_inv); } Vectorized<ComplexDbl> log10() const { auto ret = log(); return ret.elwise_mult(vd_log10e_inv); } Vectorized<ComplexDbl> log1p() const { return map(std::log1p); } Vectorized<ComplexDbl> asin() const { // asin(x) // = -i*ln(iz + sqrt(1 -z^2)) // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) auto conj = conj_(); auto b_a = conj.el_swapped(); auto ab = conj.elwise_mult(b_a); auto im = ab + ab; auto val_2 = (*this).elwise_mult(*this); auto val_2_swapped = val_2.el_swapped(); auto re = horizontal_sub(val_2, val_2_swapped); re = Vectorized<ComplexDbl>(vd_one) - re; auto root = el_blend<0x0A>(re, im).sqrt(); auto ln = (b_a + root).log(); return ln.el_swapped().conj(); } Vectorized<ComplexDbl> acos() const { // acos(x) = pi/2 - asin(x) return Vectorized(vd_pi_2) - asin(); } Vectorized<ComplexDbl> atan() const { // atan(x) = i/2 * ln((i + z)/(i - z)) auto ione = Vectorized(vd_imag_one); auto sum = ione + *this; auto sub = ione - *this; auto ln = (sum / sub).log(); // ln((i + z)/(i - z)) return ln * vd_imag_half; // i/2*ln() } Vectorized<ComplexDbl> sin() const { return map(std::sin); } Vectorized<ComplexDbl> sinh() const { return map(std::sinh); } Vectorized<ComplexDbl> cos() const { return map(std::cos); } Vectorized<ComplexDbl> cosh() const { return map(std::cosh); } Vectorized<ComplexDbl> tan() const { return map(std::tan); } Vectorized<ComplexDbl> tanh() const { return map(std::tanh); } Vectorized<ComplexDbl> ceil() const { return {vec_ceil(_vec0), vec_ceil(_vec1)}; } Vectorized<ComplexDbl> floor() const { return {vec_floor(_vec0), vec_floor(_vec1)}; } Vectorized<ComplexDbl> neg() const { auto z = Vectorized<ComplexDbl>(vd_zero); return z - *this; } Vectorized<ComplexDbl> round() const { return {vec_rint(_vec0), vec_rint(_vec1)}; } Vectorized<ComplexDbl> trunc() const { return {vec_trunc(_vec0), vec_trunc(_vec1)}; } Vectorized<ComplexDbl> elwise_sqrt() const { return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; } Vectorized<ComplexDbl> sqrt() const { return map(std::sqrt); } Vectorized<ComplexDbl> reciprocal() const { // re + im*i = (a + bi) / (c + di) // re = (ac + bd)/abs_2() = c/abs_2() // im = (bc - ad)/abs_2() = d/abs_2() auto c_d = *this ^ vd_isign_mask; // c -d auto abs = abs_2_(); return c_d.elwise_div(abs); } Vectorized<ComplexDbl> rsqrt() const { return sqrt().reciprocal(); } static Vectorized<ComplexDbl> horizontal_add( Vectorized<ComplexDbl>& first, Vectorized<ComplexDbl>& second) { auto first_perm = first.el_swapped(); // 2perm auto second_perm = second.el_swapped(); // 2perm // summ auto first_ret = first + first_perm; // 2add auto second_ret = second + second_perm; // 2 add // now lets choose evens return el_mergee(first_ret, second_ret); // 2 mergee's } static Vectorized<ComplexDbl> horizontal_sub( Vectorized<ComplexDbl>& first, Vectorized<ComplexDbl>& second) { // we will simulate it differently with 6 instructions total // lets permute second so that we can add it getting horizontal sums auto first_perm = first.el_swapped(); // 2perm auto second_perm = second.el_swapped(); // 2perm // summ auto first_ret = first - first_perm; // 2sub auto second_ret = second - second_perm; // 2 sub // now lets choose evens return el_mergee(first_ret, second_ret); // 2 mergee's } Vectorized<ComplexDbl> inline operator*(const Vectorized<ComplexDbl>& b) const { //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i #if 1 // this is more vsx friendly than simulating horizontal from x86 auto vi = b.el_mergeo(); auto vr = b.el_mergee(); vi = vi ^ vd_rsign_mask; auto ret = elwise_mult(vr); auto vx_swapped = el_swapped(); ret = vx_swapped.el_madd(vi, ret); #else auto ac_bd = elwise_mult(b); auto d_c = b.el_swapped(); d_c = d_c ^ vd_isign_mask; auto ad_bc = elwise_mult(d_c); auto ret = horizontal_sub(ac_bd, ad_bc); #endif return ret; } Vectorized<ComplexDbl> inline operator/(const Vectorized<ComplexDbl>& b) const { // re + im*i = (a + bi) / (c + di) // re = (ac + bd)/abs_2() // im = (bc - ad)/abs_2() #if 1 auto vi = b.el_mergeo(); auto vr = b.el_mergee(); auto abs_b = b.abs_2_(); vi = vi ^ vd_isign_mask; auto ret = elwise_mult(vr); auto vx_swapped = el_swapped(); ret = vx_swapped.el_madd(vi, ret); ret = ret.elwise_div(abs_b); #else // Vectorized x86 simulation auto ac_bd = elwise_mult(b); auto d_c = b.el_swapped(); d_c = d_c ^ vd_rsign_mask; auto ad_bc = elwise_mult(d_c); auto abs_b = b.abs_2_(); auto re_im = horizontal_add(ac_bd, ad_bc); auto ret = re_im.elwise_div(abs_b); #endif return ret; } Vectorized<ComplexDbl> exp() const { return map(std::exp); } Vectorized<ComplexDbl> exp2() const { return map(exp2_impl); } Vectorized<ComplexDbl> expm1() const { return map(std::expm1); } Vectorized<ComplexDbl> pow(const Vectorized<ComplexDbl>& exp) const { __at_align__ ComplexDbl x_tmp[size()]; __at_align__ ComplexDbl y_tmp[size()]; store(x_tmp); exp.store(y_tmp); for (const auto i : c10::irange(size())) { x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); } return loadu(x_tmp); } Vectorized<ComplexDbl> sgn() const { return map(at::native::sgn_impl); } Vectorized<ComplexDbl> hypot(const Vectorized<ComplexDbl>& b) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> nextafter(const Vectorized<ComplexDbl>& b) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> igamma(const Vectorized<ComplexDbl>& x) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> igammac(const Vectorized<ComplexDbl>& x) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> atan2(const Vectorized<ComplexDbl>& b) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> erf() const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> erfc() const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> operator<(const Vectorized<ComplexDbl>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> operator<=(const Vectorized<ComplexDbl>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> operator>(const Vectorized<ComplexDbl>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> operator>=(const Vectorized<ComplexDbl>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> eq(const Vectorized<ComplexDbl>& other) const { auto eq = (*this == other); // compares real and imag individually // If both real numbers and imag numbers are equal, then the complex numbers are equal return (eq.real() & eq.imag()) & vd_one; } Vectorized<ComplexDbl> ne(const Vectorized<ComplexDbl>& other) const { auto ne = (*this != other); // compares real and imag individually // If either real numbers or imag numbers are not equal, then the complex numbers are not equal return (ne.real() | ne.imag()) & vd_one; } Vectorized<ComplexDbl> lt(const Vectorized<ComplexDbl>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> le(const Vectorized<ComplexDbl>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> gt(const Vectorized<ComplexDbl>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexDbl> ge(const Vectorized<ComplexDbl>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } DEFINE_MEMBER_OP(operator==, ComplexDbl, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, ComplexDbl, vec_cmpne) DEFINE_MEMBER_OP(operator+, ComplexDbl, vec_add) DEFINE_MEMBER_OP(operator-, ComplexDbl, vec_sub) DEFINE_MEMBER_OP(operator&, ComplexDbl, vec_and) DEFINE_MEMBER_OP(operator|, ComplexDbl, vec_or) DEFINE_MEMBER_OP(operator^, ComplexDbl, vec_xor) // elelemtwise helpers DEFINE_MEMBER_OP(elwise_mult, ComplexDbl, vec_mul) DEFINE_MEMBER_OP(elwise_div, ComplexDbl, vec_div) DEFINE_MEMBER_OP(elwise_gt, ComplexDbl, vec_cmpgt) DEFINE_MEMBER_OP(elwise_ge, ComplexDbl, vec_cmpge) DEFINE_MEMBER_OP(elwise_lt, ComplexDbl, vec_cmplt) DEFINE_MEMBER_OP(elwise_le, ComplexDbl, vec_cmple) }; template <> Vectorized<ComplexDbl> inline maximum( const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) { auto abs_a = a.abs_2_(); auto abs_b = b.abs_2_(); // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ); // auto max = _mm256_blendv_ps(a, b, mask); auto mask = abs_a.elwise_lt(abs_b); auto max = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask); return max; // Exploit the fact that all-ones is a NaN. // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); // return _mm256_or_ps(max, isnan); } template <> Vectorized<ComplexDbl> inline minimum( const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) { auto abs_a = a.abs_2_(); auto abs_b = b.abs_2_(); // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ); // auto min = _mm256_blendv_ps(a, b, mask); auto mask = abs_a.elwise_gt(abs_b); auto min = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask); return min; // Exploit the fact that all-ones is a NaN. // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); // return _mm256_or_ps(min, isnan); } } // namespace } // namespace vec } // namespace at
18,631
30.314286
99
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> #include <c10/util/complex.h> #include <c10/util/irange.h> namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { using ComplexFlt = c10::complex<float>; template <> class Vectorized<ComplexFlt> { private: union { struct { vfloat32 _vec0; vfloat32 _vec1; }; struct { vbool32 _vecb0; vbool32 _vecb1; }; } __attribute__((__may_alias__)); public: using value_type = ComplexFlt; using vec_internal_type = vfloat32; using vec_internal_mask_type = vbool32; using size_type = int; static constexpr size_type size() { return 4; } Vectorized() {} C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {} Vectorized(ComplexFlt val) { float real_value = val.real(); float imag_value = val.imag(); _vec0 = vfloat32{real_value, imag_value, real_value, imag_value}; _vec1 = vfloat32{real_value, imag_value, real_value, imag_value}; } Vectorized(ComplexFlt val1, ComplexFlt val2, ComplexFlt val3, ComplexFlt val4) { _vec0 = vfloat32{val1.real(), val1.imag(), val2.real(), val2.imag()}; _vec1 = vfloat32{val3.real(), val3.imag(), val4.real(), val4.imag()}; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 0, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { return a; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 1, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { return b; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 2, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { return {b._vec0, a._vec1}; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 3, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { return {a._vec0, b._vec1}; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 4, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { const vbool32 mask_1st = VsxComplexMask1(mask); return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1}; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 5, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { const vbool32 mask_1st = VsxComplexMask1(mask); return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1}; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 6, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { const vbool32 mask_2nd = VsxComplexMask2(mask); // generated masks return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 7, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { const vbool32 mask_2nd = VsxComplexMask2(mask); // generated masks return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; } template <uint64_t mask> static std::enable_if_t<blendChoiceComplex(mask) == 8, Vectorized<ComplexFlt>> C10_ALWAYS_INLINE blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { const vbool32 mask_1st = VsxComplexMask1(mask); const vbool32 mask_2nd = VsxComplexMask2(mask); return { (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; } template <int64_t mask> static Vectorized<ComplexFlt> C10_ALWAYS_INLINE el_blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { const vbool32 mask_1st = VsxMask1(mask); const vbool32 mask_2nd = VsxMask2(mask); return { (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; } static Vectorized<ComplexFlt> blendv( const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b, const Vectorized<ComplexFlt>& mask) { // convert std::complex<V> index mask to V index mask: xy -> xxyy auto mask_complex = Vectorized<ComplexFlt>( vec_mergeh(mask._vec0, mask._vec0), vec_mergeh(mask._vec1, mask._vec1)); return { vec_sel(a._vec0, b._vec0, mask_complex._vec0), vec_sel(a._vec1, b._vec1, mask_complex._vec1), }; } static Vectorized<ComplexFlt> elwise_blendv( const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b, const Vectorized<ComplexFlt>& mask) { return { vec_sel(a._vec0, b._vec0, mask._vec0), vec_sel(a._vec1, b._vec1, mask._vec1), }; } template <typename step_t> static Vectorized<ComplexFlt> arange( ComplexFlt base = 0., step_t step = static_cast<step_t>(1)) { return Vectorized<ComplexFlt>( base, base + step, base + ComplexFlt(2) * step, base + ComplexFlt(3) * step); } static Vectorized<ComplexFlt> set( const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b, int64_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); } return b; } static Vectorized<value_type> C10_ALWAYS_INLINE loadu(const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const float*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const float*>(ptr))}; } __at_align__ value_type tmp_values[size()] = {}; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return { vec_vsx_ld(offset0, reinterpret_cast<const float*>(tmp_values)), vec_vsx_ld(offset16, reinterpret_cast<const float*>(tmp_values))}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<float*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<float*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, reinterpret_cast<float*>(tmp_values)); vec_vsx_st(_vec1, offset16, reinterpret_cast<float*>(tmp_values)); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } const ComplexFlt& operator[](int idx) const = delete; ComplexFlt& operator[](int idx) = delete; Vectorized<ComplexFlt> map(ComplexFlt (*const f)(ComplexFlt)) const { __at_align__ ComplexFlt tmp[size()]; store(tmp); for (const auto i : c10::irange(size())) { tmp[i] = f(tmp[i]); } return loadu(tmp); } Vectorized<ComplexFlt> map(ComplexFlt (*const f)(const ComplexFlt&)) const { __at_align__ ComplexFlt tmp[size()]; store(tmp); for (const auto i : c10::irange(size())) { tmp[i] = f(tmp[i]); } return loadu(tmp); } static Vectorized<ComplexFlt> horizontal_add_permD8( Vectorized<ComplexFlt>& first, Vectorized<ComplexFlt>& second) { // we will simulate it differently with 6 instructions total // lets permute second so that we can add it getting horizontal sums auto first_perm = first.el_swapped(); // 2perm auto second_perm = second.el_swapped(); // 2perm // sum auto first_ret = first + first_perm; // 2add auto second_ret = second + second_perm; // 2 add // now lets choose evens return el_mergee(first_ret, second_ret); // 2 mergee's } static Vectorized<ComplexFlt> horizontal_sub_permD8( Vectorized<ComplexFlt>& first, Vectorized<ComplexFlt>& second) { // we will simulate it differently with 6 instructions total // lets permute second so that we can add it getting horizontal sums auto first_perm = first.el_swapped(); // 2perm auto second_perm = second.el_swapped(); // 2perm // sum auto first_ret = first - first_perm; // 2sub auto second_ret = second - second_perm; // 2 sub // now lets choose evens return el_mergee(first_ret, second_ret); // 2 mergee's } Vectorized<ComplexFlt> abs_2_() const { auto a = (*this).elwise_mult(*this); auto permuted = a.el_swapped(); a = a + permuted; return a.el_mergee(); } Vectorized<ComplexFlt> abs_() const { auto ret = abs_2_(); return ret.elwise_sqrt(); } Vectorized<ComplexFlt> abs() const { return abs_() & real_mask; } Vectorized<ComplexFlt> real_() const { return *this & real_mask; } Vectorized<ComplexFlt> real() const { return *this & real_mask; } Vectorized<ComplexFlt> imag_() const { return *this & imag_mask; } Vectorized<ComplexFlt> imag() const { // we can use swap_mask or sldwi auto ret = imag_(); return { vec_sldw(ret._vec0, ret._vec0, 3), vec_sldw(ret._vec1, ret._vec1, 3)}; } Vectorized<ComplexFlt> conj_() const { return *this ^ isign_mask; } Vectorized<ComplexFlt> conj() const { return *this ^ isign_mask; } Vectorized<ComplexFlt> log() const { // Most trigonomic ops use the log() op to improve complex number // performance. return map(std::log); } Vectorized<ComplexFlt> log2() const { // log2eB_inv auto ret = log(); return ret.elwise_mult(log2e_inv); } Vectorized<ComplexFlt> log10() const { auto ret = log(); return ret.elwise_mult(log10e_inv); } Vectorized<ComplexFlt> log1p() const { return map(std::log1p); } Vectorized<ComplexFlt> el_swapped() const { vfloat32 v0 = vec_perm(_vec0, _vec0, swap_mask); vfloat32 v1 = vec_perm(_vec1, _vec1, swap_mask); return {v0, v1}; } Vectorized<ComplexFlt> el_mergee() const { // as mergee phased in , we can use vec_perm with mask return {vec_mergee(_vecb0, _vecb0), vec_mergee(_vecb1, _vecb1)}; } Vectorized<ComplexFlt> el_mergeo() const { // as mergeo phased in , we can use vec_perm with mask return {vec_mergeo(_vecb0, _vecb0), vec_mergeo(_vecb1, _vecb1)}; } Vectorized<ComplexFlt> el_madd( const Vectorized<ComplexFlt>& multiplier, const Vectorized<ComplexFlt>& val) const { return { vec_madd(_vec0, multiplier._vec0, val._vec0), vec_madd(_vec1, multiplier._vec1, val._vec1)}; } static Vectorized<ComplexFlt> el_mergee( Vectorized<ComplexFlt>& first, Vectorized<ComplexFlt>& second) { // as mergee phased in , we can use vec_perm with mask return { vec_mergee(first._vecb0, second._vecb0), vec_mergee(first._vecb1, second._vecb1)}; } Vectorized<ComplexFlt> angle_() const { // angle = atan2(b/a) // auto b_a = _mm256_permute_ps(values, 0xB1); // b a // return Sleef_atan2f8_u10(values, b_a); // 90-angle angle Vectorized<ComplexFlt> ret; for (int i = 0; i < 4; i += 2) { ret._vec0[i] = std::atan2(_vec0[i + 1], _vec0[i]); ret._vec1[i] = std::atan2(_vec1[i + 1], _vec1[i]); } return ret; } Vectorized<ComplexFlt> angle() const { return angle_() & real_mask; } Vectorized<ComplexFlt> sin() const { return map(std::sin); } Vectorized<ComplexFlt> sinh() const { return map(std::sinh); } Vectorized<ComplexFlt> cos() const { return map(std::cos); } Vectorized<ComplexFlt> cosh() const { return map(std::cosh); } Vectorized<ComplexFlt> ceil() const { return {vec_ceil(_vec0), vec_ceil(_vec1)}; } Vectorized<ComplexFlt> floor() const { return {vec_floor(_vec0), vec_floor(_vec1)}; } Vectorized<ComplexFlt> neg() const { auto z = Vectorized<ComplexFlt>(zero); return z - *this; } Vectorized<ComplexFlt> round() const { return {vec_round(_vec0), vec_round(_vec1)}; } Vectorized<ComplexFlt> tan() const { return map(std::tan); } Vectorized<ComplexFlt> tanh() const { return map(std::tanh); } Vectorized<ComplexFlt> trunc() const { return {vec_trunc(_vec0), vec_trunc(_vec1)}; } Vectorized<ComplexFlt> elwise_sqrt() const { return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; } Vectorized<ComplexFlt> sqrt() const { return map(std::sqrt); } Vectorized<ComplexFlt> reciprocal() const { // re + im*i = (a + bi) / (c + di) // re = (ac + bd)/abs_2() = c/abs_2() // im = (bc - ad)/abs_2() = d/abs_2() auto c_d = *this ^ isign_mask; // c -d auto abs = abs_2_(); return c_d.elwise_div(abs); } Vectorized<ComplexFlt> rsqrt() const { return sqrt().reciprocal(); } Vectorized<ComplexFlt> pow(const Vectorized<ComplexFlt>& exp) const { __at_align__ ComplexFlt x_tmp[size()]; __at_align__ ComplexFlt y_tmp[size()]; store(x_tmp); exp.store(y_tmp); for (const auto i : c10::irange(size())) { x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]); } return loadu(x_tmp); } Vectorized<ComplexFlt> atan() const { // atan(x) = i/2 * ln((i + z)/(i - z)) auto ione = Vectorized(imag_one); auto sum = ione + *this; auto sub = ione - *this; auto ln = (sum / sub).log(); // ln((i + z)/(i - z)) return ln * imag_half; // i/2*ln() } Vectorized<ComplexFlt> acos() const { // acos(x) = pi/2 - asin(x) return Vectorized(pi_2) - asin(); } Vectorized<ComplexFlt> inline operator*(const Vectorized<ComplexFlt>& b) const { //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i #if 1 // this is more vsx friendly than simulating horizontal from x86 auto vi = b.el_mergeo(); auto vr = b.el_mergee(); vi = vi ^ rsign_mask; auto ret = elwise_mult(vr); auto vx_swapped = el_swapped(); ret = vx_swapped.el_madd(vi, ret); return ret; #else auto ac_bd = elwise_mult(b); auto d_c = b.el_swapped(); d_c = d_c ^ isign_mask; auto ad_bc = elwise_mult(d_c); auto ret = horizontal_sub_permD8(ac_bd, ad_bc); return ret; #endif } Vectorized<ComplexFlt> inline operator/(const Vectorized<ComplexFlt>& b) const { // re + im*i = (a + bi) / (c + di) // re = (ac + bd)/abs_2() // im = (bc - ad)/abs_2() #if 1 auto vi = b.el_mergeo(); auto vr = b.el_mergee(); auto abs_b = b.abs_2_(); vi = vi ^ isign_mask; auto ret = elwise_mult(vr); auto vx_swapped = el_swapped(); ret = vx_swapped.el_madd(vi, ret); ret = ret.elwise_div(abs_b); #else // Vectorized x86 simulation auto ac_bd = elwise_mult(b); auto d_c = b.el_swapped(); d_c = d_c ^ rsign_mask; auto ad_bc = elwise_mult(d_c); auto abs_b = b.abs_2_(); auto re_im = horizontal_add_permD8(ac_bd, ad_bc); auto ret = re_im.elwise_div(abs_b); #endif return ret; } Vectorized<ComplexFlt> asin() const { // asin(x) // = -i*ln(iz + sqrt(1 -z^2)) // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi))) // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi)) #if 1 auto conj = conj_(); auto b_a = conj.el_swapped(); auto ab = conj.elwise_mult(b_a); auto im = ab + ab; auto val_2 = (*this).elwise_mult(*this); auto val_2_swapped = val_2.el_swapped(); auto re = horizontal_sub_permD8(val_2, val_2_swapped); re = Vectorized<ComplexFlt>(one) - re; auto root = el_blend<0xAA>(re, im).sqrt(); auto ln = (b_a + root).log(); return ln.el_swapped().conj(); #else return map(std::asin); #endif } Vectorized<ComplexFlt> exp() const { return map(std::exp); } Vectorized<ComplexFlt> exp2() const { return map(exp2_impl); } Vectorized<ComplexFlt> expm1() const { return map(std::expm1); } Vectorized<ComplexFlt> eq(const Vectorized<ComplexFlt>& other) const { auto eq = (*this == other); // compares real and imag individually // If both real numbers and imag numbers are equal, then the complex numbers are equal return (eq.real() & eq.imag()) & one; } Vectorized<ComplexFlt> ne(const Vectorized<ComplexFlt>& other) const { auto ne = (*this != other); // compares real and imag individually // If either real numbers or imag numbers are not equal, then the complex numbers are not equal return (ne.real() | ne.imag()) & one; } Vectorized<ComplexFlt> sgn() const { return map(at::native::sgn_impl); } Vectorized<ComplexFlt> hypot(const Vectorized<ComplexFlt>& b) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> nextafter(const Vectorized<ComplexFlt>& b) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> igamma(const Vectorized<ComplexFlt>& x) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> igammac(const Vectorized<ComplexFlt>& x) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> atan2(const Vectorized<ComplexFlt>& b) const { TORCH_CHECK(false,"not supported for complex numbers"); } Vectorized<ComplexFlt> erf() const { TORCH_CHECK(false,"not supported for complex numbers"); } Vectorized<ComplexFlt> erfc() const { TORCH_CHECK(false,"not supported for complex numbers"); } Vectorized<ComplexFlt> operator<(const Vectorized<ComplexFlt>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> operator<=(const Vectorized<ComplexFlt>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> operator>(const Vectorized<ComplexFlt>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> operator>=(const Vectorized<ComplexFlt>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> lt(const Vectorized<ComplexFlt>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> le(const Vectorized<ComplexFlt>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> gt(const Vectorized<ComplexFlt>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } Vectorized<ComplexFlt> ge(const Vectorized<ComplexFlt>& other) const { TORCH_CHECK(false, "not supported for complex numbers"); } DEFINE_MEMBER_OP(operator==, ComplexFlt, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, ComplexFlt, vec_cmpne) DEFINE_MEMBER_OP(operator+, ComplexFlt, vec_add) DEFINE_MEMBER_OP(operator-, ComplexFlt, vec_sub) DEFINE_MEMBER_OP(operator&, ComplexFlt, vec_and) DEFINE_MEMBER_OP(operator|, ComplexFlt, vec_or) DEFINE_MEMBER_OP(operator^, ComplexFlt, vec_xor) // elementwise helpers DEFINE_MEMBER_OP(elwise_mult, ComplexFlt, vec_mul) DEFINE_MEMBER_OP(elwise_div, ComplexFlt, vec_div) DEFINE_MEMBER_OP(elwise_gt, ComplexFlt, vec_cmpgt) DEFINE_MEMBER_OP(elwise_ge, ComplexFlt, vec_cmpge) DEFINE_MEMBER_OP(elwise_lt, ComplexFlt, vec_cmplt) DEFINE_MEMBER_OP(elwise_le, ComplexFlt, vec_cmple) }; template <> Vectorized<ComplexFlt> inline maximum( const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { auto abs_a = a.abs_2_(); auto abs_b = b.abs_2_(); // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ); // auto max = _mm256_blendv_ps(a, b, mask); auto mask = abs_a.elwise_lt(abs_b); auto max = Vectorized<ComplexFlt>::elwise_blendv(a, b, mask); return max; // Exploit the fact that all-ones is a NaN. // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); // return _mm256_or_ps(max, isnan); } template <> Vectorized<ComplexFlt> inline minimum( const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) { auto abs_a = a.abs_2_(); auto abs_b = b.abs_2_(); // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ); // auto min = _mm256_blendv_ps(a, b, mask); auto mask = abs_a.elwise_gt(abs_b); auto min = Vectorized<ComplexFlt>::elwise_blendv(a, b, mask); return min; // Exploit the fact that all-ones is a NaN. // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q); // return _mm256_or_ps(min, isnan); } } // namespace } // namespace vec } // namespace at
21,047
30.508982
99
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> #include <c10/util/irange.h> #include <sleef.h> namespace at { namespace vec { inline namespace CPU_CAPABILITY { template <> class Vectorized<double> { private: union { struct { vfloat64 _vec0; vfloat64 _vec1; }; struct { vbool64 _vecb0; vbool64 _vecb1; }; } __attribute__((__may_alias__)); public: using value_type = double; using vec_internal_type = vfloat64; using vec_internal_mask_type = vbool64; using size_type = int; static constexpr size_type size() { return 4; } Vectorized() {} C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {} C10_ALWAYS_INLINE Vectorized(double scalar) : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} C10_ALWAYS_INLINE Vectorized( double scalar1, double scalar2, double scalar3, double scalar4) : _vec0{vfloat64{scalar1, scalar2}}, _vec1{vfloat64{scalar3, scalar4}} {} C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } int zero_mask() const { auto cmp = (*this == vd_zero); return (cmp._vecb0[0] & 1) | (cmp._vecb0[1] & 2) | (cmp._vecb1[0] & 4) | (cmp._vecb1[1] & 8); } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 0, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { return a; } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 1, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { return b; } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 2, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { return { b._vec0, a._vec1 }; } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 3, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { return { a._vec0, b._vec1 }; } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 4, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { const vbool64 mask_1st = VsxDblMask1(mask); return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1 }; } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 5, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { const vbool64 mask_1st = VsxDblMask1(mask); return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1 }; } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 6, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { const vbool64 mask_2nd = VsxDblMask2(mask); // generated masks return { a._vec0, (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) }; } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 7, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { const vbool64 mask_2nd = VsxDblMask2(mask); // generated masks return { b._vec0, (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) }; } template <int64_t mask> static std::enable_if_t<blendChoiceDbl(mask) == 8, Vectorized<double>> C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) { const vbool64 mask_1st = VsxDblMask1(mask); const vbool64 mask_2nd = VsxDblMask2(mask); return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) }; } static Vectorized<double> C10_ALWAYS_INLINE blendv( const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& mask) { // the mask used here returned by comparision of vec256 return { vec_sel(a._vec0, b._vec0, mask._vecb0), vec_sel(a._vec1, b._vec1, mask._vecb1)}; } template <typename step_t> static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) { return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step); } static Vectorized<double> C10_ALWAYS_INLINE set(const Vectorized<double>& a, const Vectorized<double>& b, size_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); } return b; } static Vectorized<value_type> C10_ALWAYS_INLINE loadu(const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))}; } __at_align__ value_type tmp_values[size()] = {}; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, tmp_values); vec_vsx_st(_vec1, offset16, tmp_values); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } const double& operator[](int idx) const = delete; double& operator[](int idx) = delete; Vectorized<double> map(double (*const f)(double)) const { Vectorized<double> ret; for (const auto i : c10::irange(size()/2)) { ret._vec0[i] = f(_vec0[i]); } for (const auto i : c10::irange(size()/2)) { ret._vec1[i] = f(_vec1[i]); } return ret; } Vectorized<double> mapbi(double (*const f)(double, double), const Vectorized<double>& other) const { Vectorized<double> ret; for (const auto i : c10::irange(size()/2)) { ret._vec0[i] = f(_vec0[i], other._vec0[i]); } for (const auto i : c10::irange(size()/2)) { ret._vec1[i] = f(_vec1[i], other._vec1[i]); } return ret; } Vectorized<double> C10_ALWAYS_INLINE abs() const { return {vec_abs(_vec0), vec_abs(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE acos() const { return {Sleef_acosd2_u10(_vec0), Sleef_acosd2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE asin() const { return {Sleef_asind2_u10(_vec0), Sleef_asind2_u10(_vec1)}; } Vectorized<double> atan() const { return {Sleef_atand2_u10(_vec0), Sleef_atand2_u10(_vec1)}; } Vectorized<double> atan2(const Vectorized<double>& b) const { return {Sleef_atan2d2_u10(_vec0, b._vec0), Sleef_atan2d2_u10(_vec1, b._vec1)}; } Vectorized<double> copysign(const Vectorized<double> &sign) const { return {Sleef_copysignd2(_vec0, sign._vec0), Sleef_copysignd2(_vec1, sign._vec1)}; } Vectorized<double> erf() const { return {Sleef_erfd2_u10(_vec0), Sleef_erfd2_u10(_vec1)}; } Vectorized<double> erfc() const { return {Sleef_erfcd2_u15(_vec0), Sleef_erfcd2_u15(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE exp() const { return {Sleef_expd2_u10(_vec0), Sleef_expd2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE exp2() const { return {Sleef_exp2d2_u10(_vec0), Sleef_exp2d2_u10(_vec1)}; } Vectorized<double> expm1() const { return {Sleef_expm1d2_u10(_vec0), Sleef_expm1d2_u10(_vec1)}; } Vectorized<double> lgamma() const __ubsan_ignore_undefined__ { return {Sleef_lgammad2_u10(_vec0), Sleef_lgammad2_u10(_vec1)}; } Vectorized<double> erfinv() const { return map(calc_erfinv); } Vectorized<double> angle() const { auto tmp = blendv( Vectorized<double>(0), Vectorized<double>(c10::pi<double>), *this < Vectorized<double>(0)); return blendv(tmp, *this, isnan()); } Vectorized<double> real() const { return *this; } Vectorized<double> imag() const { return Vectorized<double>{0}; } Vectorized<double> conj() const { return *this; } Vectorized<double> C10_ALWAYS_INLINE log() const { return {Sleef_logd2_u10(_vec0), Sleef_logd2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE log10() const { return {Sleef_log10d2_u10(_vec0), Sleef_log10d2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE log1p() const { return {Sleef_log1pd2_u10(_vec0), Sleef_log1pd2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE log2() const { return {Sleef_log2d2_u10(_vec0), Sleef_log2d2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE ceil() const { return {vec_ceil(_vec0), vec_ceil(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE cos() const { return {Sleef_cosd2_u10(_vec0), Sleef_cosd2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE cosh() const { return {Sleef_coshd2_u10(_vec0), Sleef_coshd2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE floor() const { return {vec_floor(_vec0), vec_floor(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE neg() const { return {vec_neg(_vec0), vec_neg(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE round() const { return {vec_rint(_vec0), vec_rint(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE sin() const { return {Sleef_sind2_u10(_vec0), Sleef_sind2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE sinh() const { return {Sleef_sinhd2_u10(_vec0), Sleef_sinhd2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE tan() const { return {Sleef_tand2_u10(_vec0), Sleef_tand2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE tanh() const { return {Sleef_tanhd2_u10(_vec0), Sleef_tanhd2_u10(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE trunc() const { return {vec_trunc(_vec0), vec_trunc(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE frac() const { return *this - trunc(); } Vectorized<double> C10_ALWAYS_INLINE sqrt() const { return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; } Vectorized<double> C10_ALWAYS_INLINE reciprocal() const { return { vec_div(vd_one, _vec0), // vec_re(_vec0) is estimated one. vec_div(vd_one, _vec1)}; } Vectorized<double> C10_ALWAYS_INLINE rsqrt() const { return sqrt().reciprocal(); } Vectorized<double> C10_ALWAYS_INLINE pow(const Vectorized<double>& b) const { return {Sleef_powd2_u10(_vec0, b._vec0), Sleef_powd2_u10(_vec1, b._vec1)}; } Vectorized<double> C10_ALWAYS_INLINE fmod(const Vectorized<double>& b) const { return {Sleef_fmodd2(_vec0, b._vec0),Sleef_fmodd2(_vec1, b._vec1)}; } Vectorized<double> hypot(const Vectorized<double>& b) const { return {Sleef_hypotd2_u05(_vec0, b._vec0), Sleef_hypotd2_u05(_vec1, b._vec1)}; } Vectorized<double> nextafter(const Vectorized<double>& b) const { return {Sleef_nextafterd2(_vec0, b._vec0), Sleef_nextafterd2(_vec1, b._vec1)}; } Vectorized<double> igamma(const Vectorized<double>& x) const { return mapbi(calc_igamma, x); } Vectorized<double> igammac(const Vectorized<double>& x) const { return mapbi(calc_igammac, x); } Vectorized<double> i0() const { return map(calc_i0); } Vectorized<double> i0e() const { return map(calc_i0e); } Vectorized<double> _nor() const { return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)}; } Vectorized<double> isnan() const { auto x = *this; auto ret = (x == x); return ret._nor(); } DEFINE_MEMBER_OP(operator==, double, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, double, vec_cmpne) DEFINE_MEMBER_OP(operator<, double, vec_cmplt) DEFINE_MEMBER_OP(operator<=, double, vec_cmple) DEFINE_MEMBER_OP(operator>, double, vec_cmpgt) DEFINE_MEMBER_OP(operator>=, double, vec_cmpge) DEFINE_MEMBER_OP_AND_ONE(eq, double, vec_cmpeq) DEFINE_MEMBER_OP_AND_ONE(ne, double, vec_cmpne) DEFINE_MEMBER_OP_AND_ONE(lt, double, vec_cmplt) DEFINE_MEMBER_OP_AND_ONE(le, double, vec_cmple) DEFINE_MEMBER_OP_AND_ONE(gt, double, vec_cmpgt) DEFINE_MEMBER_OP_AND_ONE(ge, double, vec_cmpge) DEFINE_MEMBER_OP(operator+, double, vec_add) DEFINE_MEMBER_OP(operator-, double, vec_sub) DEFINE_MEMBER_OP(operator*, double, vec_mul) DEFINE_MEMBER_OP(operator/, double, vec_div) DEFINE_MEMBER_OP(maximum, double, vec_max_nan2) DEFINE_MEMBER_OP(minimum, double, vec_min_nan2) DEFINE_MEMBER_OP(operator&, double, vec_and) DEFINE_MEMBER_OP(operator|, double, vec_or) DEFINE_MEMBER_OP(operator^, double, vec_xor) DEFINE_MEMBER_TERNARY_OP(madd, double, vec_madd) }; template <> Vectorized<double> inline maximum( const Vectorized<double>& a, const Vectorized<double>& b) { return a.maximum(b); } template <> Vectorized<double> inline minimum( const Vectorized<double>& a, const Vectorized<double>& b) { return a.minimum(b); } } // namespace } // namespace vec } // namespace at
13,709
31.956731
97
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> #include <sleef.h> namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { template <> class Vectorized<float> { private: union { struct { vfloat32 _vec0; vfloat32 _vec1; }; struct { vbool32 _vecb0; vbool32 _vecb1; }; } __attribute__((__may_alias__)); public: using value_type = float; using vec_internal_type = vfloat32; using vec_internal_mask_type = vbool32; using size_type = int; static constexpr size_type size() { return 8; } Vectorized() {} C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {} C10_ALWAYS_INLINE Vectorized(float scalar) : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} C10_ALWAYS_INLINE Vectorized( float scalar1, float scalar2, float scalar3, float scalar4, float scalar5, float scalar6, float scalar7, float scalar8) : _vec0{vfloat32{scalar1, scalar2, scalar3, scalar4}}, _vec1{vfloat32{scalar5, scalar6, scalar7, scalar8}} {} C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 0, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { return a; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 1, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { return b; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 2, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { return {b._vec0, a._vec1}; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 3, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { return {a._vec0, b._vec1}; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 4, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { const vbool32 mask_1st = VsxMask1(mask); return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1}; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 5, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { const vbool32 mask_1st = VsxMask1(mask); return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1}; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 6, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { const vbool32 mask_2nd = VsxMask2(mask); // generated masks return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 7, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { const vbool32 mask_2nd = VsxMask2(mask); // generated masks return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; } template <int64_t mask> static std::enable_if_t<blendChoice(mask) == 8, Vectorized<float>> C10_ALWAYS_INLINE blend(const Vectorized<float>& a, const Vectorized<float>& b) { const vbool32 mask_1st = VsxMask1(mask); const vbool32 mask_2nd = VsxMask2(mask); return { (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)}; } static Vectorized<float> C10_ALWAYS_INLINE blendv( const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& mask) { // the mask used here returned by comparision of vec256 // assuming this we can use the same mask directly with vec_sel return { vec_sel(a._vec0, b._vec0, mask._vecb0), vec_sel(a._vec1, b._vec1, mask._vecb1)}; } template <typename step_t> static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) { return Vectorized<float>( base, base + step, base + 2 * step, base + 3 * step, base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); } static Vectorized<float> set( const Vectorized<float>& a, const Vectorized<float>& b, size_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); case 4: return blend<15>(a, b); case 5: return blend<31>(a, b); case 6: return blend<63>(a, b); case 7: return blend<127>(a, b); } return b; } static Vectorized<value_type> C10_ALWAYS_INLINE loadu(const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))}; } __at_align__ value_type tmp_values[size()] = {}; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, tmp_values); vec_vsx_st(_vec1, offset16, tmp_values); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } const float& operator[](int idx) const = delete; float& operator[](int idx) = delete; Vectorized<float> map(float (*const f)(float)) const { Vectorized<float> ret; for (int i = 0; i < size() / 2; i++) { ret._vec0[i] = f(_vec0[i]); } for (int i = 0; i < size() / 2; i++) { ret._vec1[i] = f(_vec1[i]); } return ret; } Vectorized<float> mapbi(float (*const f)(float, float), const Vectorized<float>& other) const { Vectorized<float> ret; for (int i = 0; i < size() / 2; i++) { ret._vec0[i] = f(_vec0[i], other._vec0[i]); } for (int i = 0; i < size() / 2; i++) { ret._vec1[i] = f(_vec1[i], other._vec1[i]); } return ret; } Vectorized<float> _nor() const { return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)}; } Vectorized<float> isnan() const { auto x = *this; auto ret = (x == x); return ret._nor(); } Vectorized<float> _isinf() const { auto x = *this; return (x == v_inf) | (x == v_minus_inf); } int zero_mask() const { // returns an integer mask where all zero elements are translated to 1-bit // and others are translated to 0-bit //__m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ); auto cmp = (*this == zero); // return _mm256_movemask_ps(cmp); // possible simulation //mask= lvsl ( 0 ) vbpermq( vec, mask <<5) vuint64 result0 = vec_vbpermq((vuint8)cmp._vecb0, mask_zero_bits); vuint64 result1 = vec_vbpermq((vuint8)cmp._vecb1, mask_zero_bits); return (result0[1] >> 12 | (result1[1] >> 8)); } Vectorized<float> C10_ALWAYS_INLINE abs() const { return {vec_abs(_vec0), vec_abs(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE acos() const { return {Sleef_acosf4_u10(_vec0), Sleef_acosf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE asin() const { return {Sleef_asinf4_u10(_vec0), Sleef_asinf4_u10(_vec1)}; } Vectorized<float> atan() const { return {Sleef_atanf4_u10(_vec0), Sleef_atanf4_u10(_vec1)}; } Vectorized<float> atan2(const Vectorized<float>& b) const { return {Sleef_atan2f4_u10(_vec0, b._vec0), Sleef_atan2f4_u10(_vec1, b._vec1)}; } Vectorized<float> copysign(const Vectorized<float> &sign) const { return {Sleef_copysignf4(_vec0, sign._vec0), Sleef_copysignf4(_vec1, sign._vec1)}; } Vectorized<float> lgamma() const { return {Sleef_lgammaf4_u10(_vec0), Sleef_lgammaf4_u10(_vec1)}; } Vectorized<float> erf() const { return {Sleef_erff4_u10(_vec0), Sleef_erff4_u10(_vec1)}; } Vectorized<float> erfc() const { return {Sleef_erfcf4_u15(_vec0), Sleef_erfcf4_u15(_vec1)}; } Vectorized<float> erfinv() const { return map(calc_erfinv); } Vectorized<float> angle() const { auto tmp = blendv( Vectorized<float>(0), Vectorized<float>(c10::pi<float>), *this < Vectorized<float>(0)); return blendv(tmp, *this, isnan()); } Vectorized<float> real() const { return *this; } Vectorized<float> imag() const { return Vectorized<float>{0}; } Vectorized<float> conj() const { return *this; } Vectorized<float> C10_ALWAYS_INLINE exp() const { return {Sleef_expf4_u10(_vec0), Sleef_expf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE exp2() const { return {Sleef_exp2f4_u10(_vec0), Sleef_exp2f4_u10(_vec1)}; } Vectorized<float> expm1() const { return {Sleef_expm1f4_u10(_vec0), Sleef_expm1f4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE log() const { return {Sleef_logf4_u10(_vec0), Sleef_logf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE log10() const { return {Sleef_log10f4_u10(_vec0), Sleef_log10f4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE log1p() const { return {Sleef_log1pf4_u10(_vec0), Sleef_log1pf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE log2() const { return {Sleef_log2f4_u10(_vec0), Sleef_log2f4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE ceil() const { return {vec_ceil(_vec0), vec_ceil(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE cos() const { return {Sleef_cosf4_u10(_vec0), Sleef_cosf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE cosh() const { return {Sleef_coshf4_u10(_vec0), Sleef_coshf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE floor() const { return {vec_floor(_vec0), vec_floor(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE neg() const { return {vec_neg(_vec0), vec_neg(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE round() const { return {vec_round(_vec0), vec_round(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE sin() const { return {Sleef_sinf4_u10(_vec0), Sleef_sinf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE sinh() const { return {Sleef_sinhf4_u10(_vec0), Sleef_sinhf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE tan() const { return {Sleef_tanf4_u10(_vec0), Sleef_tanf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE tanh() const { return {Sleef_tanhf4_u10(_vec0), Sleef_tanhf4_u10(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE trunc() const { return {vec_trunc(_vec0), vec_trunc(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE frac() const { return *this - trunc(); } Vectorized<float> C10_ALWAYS_INLINE sqrt() const { return {vec_sqrt(_vec0), vec_sqrt(_vec1)}; } Vectorized<float> C10_ALWAYS_INLINE reciprocal() const { return Vectorized<float>(one) / (*this); } Vectorized<float> C10_ALWAYS_INLINE rsqrt() const { return sqrt().reciprocal(); } Vectorized<float> C10_ALWAYS_INLINE pow(const Vectorized<float>& exp) const { return {Sleef_powf4_u10(_vec0, exp._vec0), Sleef_powf4_u10(_vec1, exp._vec1)}; } Vectorized<float> fmod(const Vectorized<float>& b) const { return {Sleef_fmodf4(_vec0, b._vec0),Sleef_fmodf4(_vec1, b._vec1)}; } Vectorized<float> hypot(const Vectorized<float>& b) const { return {Sleef_hypotf4_u05(_vec0, b._vec0), Sleef_hypotf4_u05(_vec1, b._vec1)}; } Vectorized<float> nextafter(const Vectorized<float>& b) const { return {Sleef_nextafterf4(_vec0, b._vec0), Sleef_nextafterf4(_vec1, b._vec1)}; } Vectorized<float> igamma(const Vectorized<float>& x) const { return mapbi(calc_igamma, x); } Vectorized<float> igammac(const Vectorized<float>& x) const { return mapbi(calc_igammac, x); } Vectorized<float> i0() const { return map(calc_i0); } Vectorized<float> i0e() const { return map(calc_i0e); } DEFINE_MEMBER_OP(operator==, float, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, float, vec_cmpne) DEFINE_MEMBER_OP(operator<, float, vec_cmplt) DEFINE_MEMBER_OP(operator<=, float, vec_cmple) DEFINE_MEMBER_OP(operator>, float, vec_cmpgt) DEFINE_MEMBER_OP(operator>=, float, vec_cmpge) DEFINE_MEMBER_OP_AND_ONE(eq, float, vec_cmpeq) DEFINE_MEMBER_OP_AND_ONE(ne, float, vec_cmpne) DEFINE_MEMBER_OP_AND_ONE(lt, float, vec_cmplt) DEFINE_MEMBER_OP_AND_ONE(le, float, vec_cmple) DEFINE_MEMBER_OP_AND_ONE(gt, float, vec_cmpgt) DEFINE_MEMBER_OP_AND_ONE(ge, float, vec_cmpge) DEFINE_MEMBER_OP(operator+, float, vec_add) DEFINE_MEMBER_OP(operator-, float, vec_sub) DEFINE_MEMBER_OP(operator*, float, vec_mul) DEFINE_MEMBER_OP(operator/, float, vec_div) DEFINE_MEMBER_OP(maximum, float, vec_max_nan2) DEFINE_MEMBER_OP(minimum, float, vec_min_nan2) DEFINE_MEMBER_OP(operator&, float, vec_and) DEFINE_MEMBER_OP(operator|, float, vec_or) DEFINE_MEMBER_OP(operator^, float, vec_xor) DEFINE_MEMBER_TERNARY_OP(madd, float, vec_madd) }; template <> Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) { return a.maximum(b); } template <> Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) { return a.minimum(b); } } // namespace } // namespace vec } // namespace at
14,249
31.167043
93
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { template <> class Vectorized<int16_t> { private: union { struct { vint16 _vec0; vint16 _vec1; }; struct { vbool16 _vecb0; vbool16 _vecb1; }; } __attribute__((__may_alias__)); public: using value_type = int16_t; using vec_internal_type = vint16; using vec_internal_mask_type = vbool16; using size_type = int; static constexpr size_type size() { return 16; } Vectorized() {} C10_ALWAYS_INLINE Vectorized(vint16 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool16 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vint16 v1, vint16 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool16 v1, vbool16 v2) : _vecb0{v1}, _vecb1{v2} {} C10_ALWAYS_INLINE Vectorized(int16_t scalar) : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} C10_ALWAYS_INLINE Vectorized( int16_t scalar1, int16_t scalar2, int16_t scalar3, int16_t scalar4, int16_t scalar5, int16_t scalar6, int16_t scalar7, int16_t scalar8, int16_t scalar9, int16_t scalar10, int16_t scalar11, int16_t scalar12, int16_t scalar13, int16_t scalar14, int16_t scalar15, int16_t scalar16) : _vec0{vint16{ scalar1, scalar2, scalar3, scalar4, scalar5, scalar6, scalar7, scalar8}}, _vec1{vint16{ scalar9, scalar10, scalar11, scalar12, scalar13, scalar14, scalar15, scalar16}} {} C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } template <uint64_t mask> static std::enable_if_t<mask == 0, Vectorized<int16_t>> C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { return a; } template <uint64_t mask> static std::enable_if_t<(mask & 65535) == 65535, Vectorized<int16_t>> C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { return b; } template <uint64_t mask> static std::enable_if_t<mask == 255, Vectorized<int16_t>> C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { return {b._vec0, a._vec1}; } template <uint64_t mask> static std::enable_if_t<(mask > 0 && mask < 255), Vectorized<int16_t>> C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { constexpr int16_t g0 = (mask & 1) * 0xffff; constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff; constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff; constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff; constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff; constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff; constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff; constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff; const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7}; return {(vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st), a._vec1}; } template <uint64_t mask> static std::enable_if_t< (mask > 255 && (mask & 65535) != 65535 && ((mask & 255) == 255)), Vectorized<int16_t>> C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { constexpr int16_t g0_2 = (mask & 1) * 0xffff; constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff; constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff; constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff; constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff; constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff; constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff; constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff; const vint16 mask_2nd = vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2}; // generated masks return {b._vec0, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)}; } template <uint64_t mask> static std::enable_if_t< (mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) == 0)), Vectorized<int16_t>> C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { constexpr int16_t mask2 = (mask & 65535) >> 16; constexpr int16_t g0_2 = (mask & 1) * 0xffff; constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff; constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff; constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff; constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff; constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff; constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff; constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff; const vint16 mask_2nd = vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2}; // generated masks return {a, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)}; } template <uint64_t mask> static std::enable_if_t< (mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) != 0) && ((mask & 255) != 255)), Vectorized<int16_t>> C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { constexpr int16_t g0 = (mask & 1) * 0xffff; constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff; constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff; constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff; constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff; constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff; constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff; constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff; constexpr int16_t mask2 = (mask & 65535) >> 16; constexpr int16_t g0_2 = (mask & 1) * 0xffff; constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff; constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff; constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff; constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff; constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff; constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff; constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff; const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7}; const vint16 mask_2nd = vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2}; // generated masks return { (vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st), (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)}; } static Vectorized<int16_t> C10_ALWAYS_INLINE blendv( const Vectorized<int16_t>& a, const Vectorized<int16_t>& b, const Vectorized<int16_t>& mask) { // the mask used here returned by comparision of vec256 // assuming this we can use the same mask directly with vec_sel // warning intel style mask will not work properly return { vec_sel(a._vec0, b._vec0, mask._vecb0), vec_sel(a._vec1, b._vec1, mask._vecb1)}; } template <typename step_t> static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) { return Vectorized<int16_t>( base, base + step, base + 2 * step, base + 3 * step, base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step, base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step, base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step); } static Vectorized<int16_t> set( const Vectorized<int16_t>& a, const Vectorized<int16_t>& b, size_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); case 4: return blend<15>(a, b); case 5: return blend<31>(a, b); case 6: return blend<63>(a, b); case 7: return blend<127>(a, b); case 8: return blend<255>(a, b); case 9: return blend<511>(a, b); case 10: return blend<1023>(a, b); case 11: return blend<2047>(a, b); case 12: return blend<4095>(a, b); case 13: return blend<8191>(a, b); case 14: return blend<16383>(a, b); case 15: return blend<32767>(a, b); } return b; } static Vectorized<value_type> C10_ALWAYS_INLINE loadu(const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))}; } __at_align__ value_type tmp_values[size()] = {}; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, tmp_values); vec_vsx_st(_vec1, offset16, tmp_values); std::memcpy(ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } const int16_t& operator[](int idx) const = delete; int16_t& operator[](int idx) = delete; Vectorized<int16_t> angle() const { return blendv( Vectorized<int16_t>(0), Vectorized<int16_t>(c10::pi<int16_t>), *this < Vectorized<int16_t>(0)); } Vectorized<int16_t> real() const { return *this; } Vectorized<int16_t> imag() const { return Vectorized<int16_t>{0}; } Vectorized<int16_t> conj() const { return *this; } Vectorized<int16_t> C10_ALWAYS_INLINE abs() const { return {vec_abs(_vec0), vec_abs(_vec1)}; } Vectorized<int16_t> C10_ALWAYS_INLINE neg() const { return {vec_neg(_vec0), vec_neg(_vec1)}; } DEFINE_MEMBER_UNARY_OP(operator~, int16_t, vec_not) DEFINE_MEMBER_OP(operator==, int16_t, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, int16_t, vec_cmpne) DEFINE_MEMBER_OP(operator<, int16_t, vec_cmplt) DEFINE_MEMBER_OP(operator<=, int16_t, vec_cmple) DEFINE_MEMBER_OP(operator>, int16_t, vec_cmpgt) DEFINE_MEMBER_OP(operator>=, int16_t, vec_cmpge) DEFINE_MEMBER_OP_AND_ONE(eq, int16_t, vec_cmpeq) DEFINE_MEMBER_OP_AND_ONE(ne, int16_t, vec_cmpne) DEFINE_MEMBER_OP_AND_ONE(lt, int16_t, vec_cmplt) DEFINE_MEMBER_OP_AND_ONE(le, int16_t, vec_cmple) DEFINE_MEMBER_OP_AND_ONE(gt, int16_t, vec_cmpgt) DEFINE_MEMBER_OP_AND_ONE(ge, int16_t, vec_cmpge) DEFINE_MEMBER_OP(operator+, int16_t, vec_add) DEFINE_MEMBER_OP(operator-, int16_t, vec_sub) DEFINE_MEMBER_OP(operator*, int16_t, vec_mul) DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int16_t, /) DEFINE_MEMBER_OP(maximum, int16_t, vec_max) DEFINE_MEMBER_OP(minimum, int16_t, vec_min) DEFINE_MEMBER_OP(operator&, int16_t, vec_and) DEFINE_MEMBER_OP(operator|, int16_t, vec_or) DEFINE_MEMBER_OP(operator^, int16_t, vec_xor) }; template <> Vectorized<int16_t> inline maximum( const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { return a.maximum(b); } template <> Vectorized<int16_t> inline minimum( const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) { return a.minimum(b); } } // namespace } // namespace vec } // namespace at
11,902
32.529577
101
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { template <> class Vectorized<int32_t> { private: union { struct { vint32 _vec0; vint32 _vec1; }; struct { vbool32 _vecb0; vbool32 _vecb1; }; } __attribute__((__may_alias__)); public: using value_type = int32_t; using vec_internal_type = vint32; using vec_internal_mask_type = vbool32; using size_type = int; static constexpr size_type size() { return 8; } Vectorized() {} C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {} C10_ALWAYS_INLINE Vectorized(int32_t scalar) : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} C10_ALWAYS_INLINE Vectorized( int32_t scalar1, int32_t scalar2, int32_t scalar3, int32_t scalar4, int32_t scalar5, int32_t scalar6, int32_t scalar7, int32_t scalar8) : _vec0{vint32{scalar1, scalar2, scalar3, scalar4}}, _vec1{vint32{scalar5, scalar6, scalar7, scalar8}} {} C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } template <uint64_t mask> static std::enable_if_t<mask == 0, Vectorized<int32_t>> C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { return a; } template <uint64_t mask> static std::enable_if_t<(mask & 255) == 255, Vectorized<int32_t>> C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { return b; } template <uint64_t mask> static std::enable_if_t<mask == 15, Vectorized<int32_t>> C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { return {b._vec0, a._vec1}; } template <uint64_t mask> static std::enable_if_t<(mask > 0 && mask < 15), Vectorized<int32_t>> C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { constexpr uint32_t g0 = (mask & 1) * 0xffffffff; constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff; constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff; constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff; const vbool32 mask_1st = (vbool32){g0, g1, g2, g3}; return {(vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st), a._vec1}; } template <uint64_t mask> static std::enable_if_t< (mask > 15 && (mask & 255) != 255 && ((mask & 15) == 15)), Vectorized<int32_t>> C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { constexpr uint32_t mask2 = (mask & 255) >> 4; constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff; constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff; constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff; constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff; const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2}; // generated masks return {b._vec0, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)}; } template <uint64_t mask> static std::enable_if_t< (mask > 15 && ((mask & 255) != 255) && ((mask & 15) == 0)), Vectorized<int32_t>> C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { constexpr uint32_t mask2 = (mask & 255) >> 4; constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff; constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff; constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff; constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff; const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2}; // generated masks return {a, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)}; } template <uint64_t mask> static std::enable_if_t< (mask > 15 && ((mask & 255) != 255) && ((mask & 15) != 0) && ((mask & 15) != 15)), Vectorized<int32_t>> C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { constexpr uint32_t g0 = (mask & 1) * 0xffffffff; constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff; constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff; constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff; constexpr uint32_t mask2 = (mask & 255) >> 4; constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff; constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff; constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff; constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff; const vbool32 mask_1st = (vbool32){g0, g1, g2, g3}; const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2}; // generated masks return { (vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st), (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)}; } static Vectorized<int32_t> C10_ALWAYS_INLINE blendv( const Vectorized<int32_t>& a, const Vectorized<int32_t>& b, const Vectorized<int32_t>& mask) { // the mask used here returned by comparision of vec256 // assuming this we can use the same mask directly with vec_sel // warning intel style mask will not work properly return { vec_sel(a._vec0, b._vec0, mask._vecb0), vec_sel(a._vec1, b._vec1, mask._vecb1)}; } template <typename step_t> static Vectorized<int32_t> arange(int32_t base = 0.f, step_t step = static_cast<step_t>(1)) { return Vectorized<int32_t>( base, base + step, base + 2 * step, base + 3 * step, base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); } static Vectorized<int32_t> set( const Vectorized<int32_t>& a, const Vectorized<int32_t>& b, size_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); case 4: return blend<15>(a, b); case 5: return blend<31>(a, b); case 6: return blend<63>(a, b); case 7: return blend<127>(a, b); } return b; } static Vectorized<value_type> C10_ALWAYS_INLINE loadu(const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))}; } __at_align__ value_type tmp_values[size()] = {}; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, tmp_values); vec_vsx_st(_vec1, offset16, tmp_values); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } const int32_t& operator[](int idx) const = delete; int32_t& operator[](int idx) = delete; Vectorized<int32_t> angle() const { return blendv( Vectorized<int32_t>(0), Vectorized<int32_t>(c10::pi<int32_t>), *this < Vectorized<int32_t>(0)); } Vectorized<int32_t> real() const { return *this; } Vectorized<int32_t> imag() const { return Vectorized<int32_t>{0}; } Vectorized<int32_t> conj() const { return *this; } Vectorized<int32_t> C10_ALWAYS_INLINE abs() const { return {vec_abs(_vec0), vec_abs(_vec1)}; } Vectorized<int32_t> C10_ALWAYS_INLINE neg() const { return {vec_neg(_vec0), vec_neg(_vec1)}; } DEFINE_MEMBER_UNARY_OP(operator~, int32_t, vec_not) DEFINE_MEMBER_OP(operator==, int32_t, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, int32_t, vec_cmpne) DEFINE_MEMBER_OP(operator<, int32_t, vec_cmplt) DEFINE_MEMBER_OP(operator<=, int32_t, vec_cmple) DEFINE_MEMBER_OP(operator>, int32_t, vec_cmpgt) DEFINE_MEMBER_OP(operator>=, int32_t, vec_cmpge) DEFINE_MEMBER_OP_AND_ONE(eq, int32_t, vec_cmpeq) DEFINE_MEMBER_OP_AND_ONE(ne, int32_t, vec_cmpne) DEFINE_MEMBER_OP_AND_ONE(lt, int32_t, vec_cmplt) DEFINE_MEMBER_OP_AND_ONE(le, int32_t, vec_cmple) DEFINE_MEMBER_OP_AND_ONE(gt, int32_t, vec_cmpgt) DEFINE_MEMBER_OP_AND_ONE(ge, int32_t, vec_cmpge) DEFINE_MEMBER_OP(operator+, int32_t, vec_add) DEFINE_MEMBER_OP(operator-, int32_t, vec_sub) DEFINE_MEMBER_OP(operator*, int32_t, vec_mul) DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int32_t, /) DEFINE_MEMBER_OP(maximum, int32_t, vec_max) DEFINE_MEMBER_OP(minimum, int32_t, vec_min) DEFINE_MEMBER_OP(operator&, int32_t, vec_and) DEFINE_MEMBER_OP(operator|, int32_t, vec_or) DEFINE_MEMBER_OP(operator^, int32_t, vec_xor) }; template <> Vectorized<int32_t> inline maximum( const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { return a.maximum(b); } template <> Vectorized<int32_t> inline minimum( const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) { return a.minimum(b); } } // namespace } // namespace vec } // namespace at
9,723
33.119298
101
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { template <> class Vectorized<int64_t> { private: union { struct { vint64 _vec0; vint64 _vec1; }; struct { vbool64 _vecb0; vbool64 _vecb1; }; } __attribute__((__may_alias__)); public: using value_type = int64_t; using vec_internal_type = vint64; using vec_internal_mask_type = vbool64; using size_type = int; static constexpr size_type size() { return 4; } Vectorized() {} C10_ALWAYS_INLINE Vectorized(vint64 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vint64 v1, vint64 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {} C10_ALWAYS_INLINE Vectorized(int64_t scalar) : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {} C10_ALWAYS_INLINE Vectorized( int64_t scalar1, int64_t scalar2, int64_t scalar3, int64_t scalar4) : _vec0{vint64{scalar1, scalar2}}, _vec1{vint64{scalar3, scalar4}} {} C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } template <uint64_t mask> static std::enable_if_t<mask == 0, Vectorized<int64_t>> C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) { return a; } template <uint64_t mask> static std::enable_if_t<mask == 3, Vectorized<int64_t>> C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) { return {b._vec0, a._vec1}; } template <uint64_t mask> static std::enable_if_t<(mask & 15) == 15, Vectorized<int64_t>> C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) { return b; } template <uint64_t mask> static std::enable_if_t<(mask > 0 && mask < 3), Vectorized<int64_t>> C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) { constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff; constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; const vbool64 mask_1st = (vbool64){g0, g1}; return {(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), a._vec1}; } template <uint64_t mask> static std::enable_if_t<(mask > 3) && (mask & 3) == 0, Vectorized<int64_t>> C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) { constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff; constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff; const vbool64 mask_2nd = (vbool64){g0_2, g1_2}; return {a._vec0, (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)}; } template <uint64_t mask> static std::enable_if_t< (mask > 3) && (mask & 3) != 0 && (mask & 15) != 15, Vectorized<int64_t>> C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) { constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff; constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff; constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff; const vbool64 mask_1st = (vbool64){g0, g1}; const vbool64 mask_2nd = (vbool64){g0_2, g1_2}; return { (vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)}; } static Vectorized<int64_t> C10_ALWAYS_INLINE blendv( const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const Vectorized<int64_t>& mask) { // the mask used here returned by comparision of vec256 return { vec_sel(a._vec0, b._vec0, mask._vecb0), vec_sel(a._vec1, b._vec1, mask._vecb1)}; } template <typename step_t> static Vectorized<int64_t> arange(int64_t base = 0., step_t step = static_cast<step_t>(1)) { return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step); } static Vectorized<int64_t> C10_ALWAYS_INLINE set(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, size_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); } return b; } static Vectorized<value_type> C10_ALWAYS_INLINE loadu(const void* ptr, int count = size()) { if (count == size()) { static_assert(sizeof(double) == sizeof(value_type)); const double* dptr = reinterpret_cast<const double*>(ptr); return {// treat it as double load (vint64)vec_vsx_ld(offset0, dptr), (vint64)vec_vsx_ld(offset16, dptr)}; } __at_align__ double tmp_values[size()] = {}; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return { (vint64)vec_vsx_ld(offset0, tmp_values), (vint64)vec_vsx_ld(offset16, tmp_values)}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { double* dptr = reinterpret_cast<double*>(ptr); vec_vsx_st((vfloat64)_vec0, offset0, dptr); vec_vsx_st((vfloat64)_vec1, offset16, dptr); } else if (count > 0) { __at_align__ double tmp_values[size()]; vec_vsx_st((vfloat64)_vec0, offset0, tmp_values); vec_vsx_st((vfloat64)_vec1, offset16, tmp_values); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } const int64_t& operator[](int idx) const = delete; int64_t& operator[](int idx) = delete; Vectorized<int64_t> angle() const { return blendv( Vectorized<int64_t>(0), Vectorized<int64_t>(c10::pi<int64_t>), *this < Vectorized<int64_t>(0)); } Vectorized<int64_t> real() const { return *this; } Vectorized<int64_t> imag() const { return Vectorized<int64_t>{0}; } Vectorized<int64_t> conj() const { return *this; } Vectorized<int64_t> C10_ALWAYS_INLINE abs() const { return {vec_abs(_vec0), vec_abs(_vec1)}; } Vectorized<int64_t> C10_ALWAYS_INLINE neg() const { return {vec_neg(_vec0), vec_neg(_vec1)}; } DEFINE_MEMBER_UNARY_OP(operator~, int64_t, vec_not) DEFINE_MEMBER_OP(operator==, int64_t, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, int64_t, vec_cmpne) DEFINE_MEMBER_OP(operator<, int64_t, vec_cmplt) DEFINE_MEMBER_OP(operator<=, int64_t, vec_cmple) DEFINE_MEMBER_OP(operator>, int64_t, vec_cmpgt) DEFINE_MEMBER_OP(operator>=, int64_t, vec_cmpge) DEFINE_MEMBER_OP_AND_ONE(eq, int64_t, vec_cmpeq) DEFINE_MEMBER_OP_AND_ONE(ne, int64_t, vec_cmpne) DEFINE_MEMBER_OP_AND_ONE(lt, int64_t, vec_cmplt) DEFINE_MEMBER_OP_AND_ONE(le, int64_t, vec_cmple) DEFINE_MEMBER_OP_AND_ONE(gt, int64_t, vec_cmpgt) DEFINE_MEMBER_OP_AND_ONE(ge, int64_t, vec_cmpge) DEFINE_MEMBER_OP(operator+, int64_t, vec_add) DEFINE_MEMBER_OP(operator-, int64_t, vec_sub) DEFINE_MEMBER_OP(operator*, int64_t, vec_mul) DEFINE_MEMBER_OP(operator/, int64_t, vec_div) DEFINE_MEMBER_OP(maximum, int64_t, vec_max) DEFINE_MEMBER_OP(minimum, int64_t, vec_min) DEFINE_MEMBER_OP(operator&, int64_t, vec_and) DEFINE_MEMBER_OP(operator|, int64_t, vec_or) DEFINE_MEMBER_OP(operator^, int64_t, vec_xor) }; template <> Vectorized<int64_t> inline maximum( const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) { return a.maximum(b); } template <> Vectorized<int64_t> inline minimum( const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) { return a.minimum(b); } } // namespace } // namespace vec } // namespace at
7,922
32.43038
101
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> #include <c10/util/qint32.h> #include <array> // This file defines Vectorized<> for the quantized types. // // // Currently, we simply use these classes as efficient converters between // the quantized types and Vectorized<float>, usually in bandwidth-bound cases // where doing the arithmetic in full-precision is acceptable (e.g. // elementwise operators). // // // Conversions are as follows: // Vectorized<qint32> -> 1x Vectorized<float> // // The size of the returned float vector is specified by the special // constexpr function float_num_vecs. The type of the value returned // from dequantize (and expected as an argument to quantize) is // specified by float_vec_return_type. // // When writing kernels with these vectors, it is expected that floating- // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs // iterations. namespace at { namespace vec { inline namespace CPU_CAPABILITY { template <> struct Vectorized<c10::qint32> { private: union { struct { vint32 _vec0; vint32 _vec1; }; struct { vbool32 _vecb0; vbool32 _vecb1; }; } __attribute__((__may_alias__)); public: Vectorized() {} using size_type = int; static constexpr size_type size() { return 8; } static constexpr size_t float_num_vecs() { return 1; } static constexpr int int_num_vecs() { return 1; } using float_vec_return_type = std::array<Vectorized<float>, 1>; using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>; using value_type = c10::qint32::underlying; using vec_internal_type = vint32; using vec_internal_mask_type = vbool32; C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {} Vectorized(const c10::qint32& val) : _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {} static Vectorized<c10::qint32> C10_ALWAYS_INLINE loadu(const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))}; } __at_align__ value_type tmp_values[size()] = {}; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, tmp_values); vec_vsx_st(_vec1, offset16, tmp_values); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } float_vec_return_type dequantize( Vectorized<float> scale, Vectorized<float> zero_point, Vectorized<float> scale_zp_premul) const { vfloat32 float_vals0 = vec_float(_vec0); vfloat32 float_vals1 = vec_float(_vec1); vfloat32 scale_vec0 = scale.vec0(); vfloat32 scale_vec1 = scale.vec1(); vfloat32 scale_zp_premul0 = scale_zp_premul.vec0(); vfloat32 scale_zp_premul1 = scale_zp_premul.vec1(); return {Vectorized<float>{ vec_madd(scale_vec0, float_vals0, scale_zp_premul0), vec_madd(scale_vec1, float_vals1, scale_zp_premul1)}}; } static Vectorized<c10::qint32> quantize( const float_vec_return_type& rhs, float scale, int32_t zero_point, float inverse_scale) { Vectorized<c10::qint32> retval; const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min()); const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max()); vfloat32 inverse_scale_v = vec_splats(inverse_scale); vfloat32 vec_zero_point = vec_splats((float)(zero_point)); Vectorized<float> vf0 = rhs[0]; vfloat32 vecf0 = vf0.vec0(); vfloat32 vecf1 = vf0.vec1(); vecf0 = vec_mul(vecf0, inverse_scale_v); vecf1 = vec_mul(vecf1, inverse_scale_v); vecf0 = vec_add(vec_rint(vecf0), vec_zero_point); vecf1 = vec_add(vec_rint(vecf1), vec_zero_point); vint32 veci0 = vec_signed(vecf0); vint32 veci1 = vec_signed(vecf1); veci0 = vec_max(veci0, vmin); veci1 = vec_max(veci1, vmin); veci0 = vec_min(veci0, vmax); veci1 = vec_min(veci1, vmax); return {veci0, veci1}; } Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const { return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)}; } Vectorized<c10::qint32> relu6( Vectorized<c10::qint32> zero_point, Vectorized<c10::qint32> q_six) const { vint32 max0 = vec_max(_vec0, zero_point._vec0); vint32 max1 = vec_max(_vec1, zero_point._vec1); return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)}; } int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const { return {*this - b}; } static Vectorized<c10::qint32> requantize_from_int( const int_vec_return_type& inp, float multiplier, int32_t zero_point) { const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min()); const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max()); vfloat32 vec_mult = vec_splats(multiplier); vint32 vec_zero_point = vec_splats(zero_point); Vectorized<c10::qint32> vi = inp[0]; vfloat32 vecf0 = vec_float(vi.vec0()); vfloat32 vecf1 = vec_float(vi.vec1()); vecf0 = vec_mul(vecf0, vec_mult); vecf1 = vec_mul(vecf1, vec_mult); vecf0 = vec_rint(vecf0); vecf1 = vec_rint(vecf1); vint32 veci0 = vec_add(vec_signed(vecf0),vec_zero_point); vint32 veci1 = vec_add(vec_signed(vecf1),vec_zero_point); veci0 = vec_max(veci0, vmin); veci1 = vec_max(veci1, vmin); veci0 = vec_min(veci0, vmax); veci1 = vec_min(veci1, vmax); return {veci0, veci1}; } DEFINE_MEMBER_OP(operator==, c10::qint32, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, c10::qint32, vec_cmpne) DEFINE_MEMBER_OP(operator<, c10::qint32, vec_cmplt) DEFINE_MEMBER_OP(operator<=, c10::qint32, vec_cmple) DEFINE_MEMBER_OP(operator>, c10::qint32, vec_cmpgt) DEFINE_MEMBER_OP(operator>=, c10::qint32, vec_cmpge) DEFINE_MEMBER_OP(operator+, c10::qint32, vec_add) DEFINE_MEMBER_OP(operator-, c10::qint32, vec_sub) DEFINE_MEMBER_OP(operator*, c10::qint32, vec_mul) DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint32, /) DEFINE_MEMBER_OP(maximum, c10::qint32, vec_max) DEFINE_MEMBER_OP(minimum, c10::qint32, vec_min) DEFINE_MEMBER_OP(operator&, c10::qint32, vec_and) DEFINE_MEMBER_OP(operator|, c10::qint32, vec_or) DEFINE_MEMBER_OP(operator^, c10::qint32, vec_xor) }; template <> Vectorized<c10::qint32> inline maximum( const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) { return a.maximum(b); } template <> Vectorized<c10::qint32> inline minimum( const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) { return a.minimum(b); } } // namespace } // namespace vec } // namespace at
7,686
32.133621
84
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> #include <c10/util/qint8.h> #include <array> // This file defines Vectorized<> for the quantized types. // // // Currently, we simply use these classes as efficient converters between // the quantized types and Vectorized<float>, usually in bandwidth-bound cases // where doing the arithmetic in full-precision is acceptable (e.g. // elementwise operators). // // // Conversions are as follows: // Vectorized<qint8> -> 4x Vectorized<float> // // The size of the returned float vector is specified by the special // constexpr function float_num_vecs. The type of the value returned // from dequantize (and expected as an argument to quantize) is // specified by float_vec_return_type. // // When writing kernels with these vectors, it is expected that floating- // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs // iterations. namespace at { namespace vec { inline namespace CPU_CAPABILITY { template <> struct Vectorized<c10::qint8> { private: union { struct { vint8 _vec0; vint8 _vec1; }; struct { vbool8 _vecb0; vbool8 _vecb1; }; } __attribute__((__may_alias__)); public: Vectorized() {} using size_type = int; static constexpr size_type size() { return 32; } static constexpr size_t float_num_vecs() { return 4; } static constexpr int int_num_vecs() { return 4; } using float_vec_return_type = std::array<Vectorized<float>, 4>; using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>; using value_type = typename c10::qint8::underlying; using vec_internal_type = vint8; using vec_internal_mask_type = vbool8; // Broadcast constructor C10_ALWAYS_INLINE Vectorized(const c10::qint8& val) : _vec0{vec_splats(val.val_)}, _vec1{vec_splats(val.val_)} {} C10_ALWAYS_INLINE Vectorized(const Vectorized<c10::qint8>& other) : _vec0{other._vec0}, _vec1(other._vec1) {} C10_ALWAYS_INLINE Vectorized(vint8 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool8 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vint8 v1, vint8 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool8 v1, vbool8 v2) : _vecb0{v1}, _vecb1{v2} {} C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } static C10_ALWAYS_INLINE Vectorized<c10::qint8> loadu( const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const vint8*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const vint8*>(ptr))}; } __at_align__ value_type tmp_values[size()]; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, tmp_values); vec_vsx_st(_vec1, offset16, tmp_values); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } public: float_vec_return_type C10_ALWAYS_INLINE dequantize( Vectorized<float> scale, Vectorized<float> zero_point, Vectorized<float> scale_zp_premul) const { vint16 vecshi0 = vec_unpackh(_vec0); vint16 vecshi1 = vec_unpackl(_vec0); vint16 vecshi2 = vec_unpackh(_vec1); vint16 vecshi3 = vec_unpackl(_vec1); vint32 veci0 = vec_unpackh(vecshi0); vint32 veci1 = vec_unpackl(vecshi0); vint32 veci2 = vec_unpackh(vecshi1); vint32 veci3 = vec_unpackl(vecshi1); vint32 veci4 = vec_unpackh(vecshi2); vint32 veci5 = vec_unpackl(vecshi2); vint32 veci6 = vec_unpackh(vecshi3); vint32 veci7 = vec_unpackl(vecshi3); vfloat32 vecf0_0 = vec_float(veci0); vfloat32 vecf1_0 = vec_float(veci1); vfloat32 vecf0_1 = vec_float(veci2); vfloat32 vecf1_1 = vec_float(veci3); vfloat32 vecf0_2 = vec_float(veci4); vfloat32 vecf1_2 = vec_float(veci5); vfloat32 vecf0_3 = vec_float(veci6); vfloat32 vecf1_3 = vec_float(veci7); vfloat32 scale_vec0 = scale.vec0(); vfloat32 scale_vec1 = scale.vec1(); vfloat32 scale_zp_premul0 = scale_zp_premul.vec0(); vfloat32 scale_zp_premul1 = scale_zp_premul.vec1(); return { Vectorized<float>{ vec_madd(scale_vec0, vecf0_0, scale_zp_premul0), vec_madd(scale_vec1, vecf1_0, scale_zp_premul1)}, Vectorized<float>{ vec_madd(scale_vec0, vecf0_1, scale_zp_premul0), vec_madd(scale_vec1, vecf1_1, scale_zp_premul1)}, Vectorized<float>{ vec_madd(scale_vec0, vecf0_2, scale_zp_premul0), vec_madd(scale_vec1, vecf1_2, scale_zp_premul1)}, Vectorized<float>{ vec_madd(scale_vec0, vecf0_3, scale_zp_premul0), vec_madd(scale_vec1, vecf1_3, scale_zp_premul1)}}; } static Vectorized<c10::qint8> quantize( const float_vec_return_type& rhs, float scale, int32_t zero_point, float inverse_scale) { // constexpr int32_t min_val = std::numeric_limits<value_type>::min(); // constexpr int32_t max_val = std::numeric_limits<value_type>::max(); vfloat32 inverse_scale_v = vec_splats(inverse_scale); vfloat32 vec_zero_point = vec_splats((float)zero_point); // vint32 vmin = vec_splats(min_val); // vint32 vmax = vec_splats(max_val); Vectorized<float> vf0 = rhs[0]; Vectorized<float> vf1 = rhs[1]; Vectorized<float> vf2 = rhs[2]; Vectorized<float> vf3 = rhs[3]; vfloat32 vecf0 = vf0.vec0(); vfloat32 vecf1 = vf0.vec1(); vfloat32 vecf2 = vf1.vec0(); vfloat32 vecf3 = vf1.vec1(); vfloat32 vecf4 = vf2.vec0(); vfloat32 vecf5 = vf2.vec1(); vfloat32 vecf6 = vf3.vec0(); vfloat32 vecf7 = vf3.vec1(); vecf0 = vec_mul(vecf0, inverse_scale_v); vecf1 = vec_mul(vecf1, inverse_scale_v); vecf2 = vec_mul(vecf2, inverse_scale_v); vecf3 = vec_mul(vecf3, inverse_scale_v); vecf4 = vec_mul(vecf4, inverse_scale_v); vecf5 = vec_mul(vecf5, inverse_scale_v); vecf6 = vec_mul(vecf6, inverse_scale_v); vecf7 = vec_mul(vecf7, inverse_scale_v); vecf0 = vec_add(vec_rint(vecf0), vec_zero_point); vecf1 = vec_add(vec_rint(vecf1), vec_zero_point); vecf2 = vec_add(vec_rint(vecf2), vec_zero_point); vecf3 = vec_add(vec_rint(vecf3), vec_zero_point); vecf4 = vec_add(vec_rint(vecf4), vec_zero_point); vecf5 = vec_add(vec_rint(vecf5), vec_zero_point); vecf6 = vec_add(vec_rint(vecf6), vec_zero_point); vecf7 = vec_add(vec_rint(vecf7), vec_zero_point); vint32 veci0 = vec_signed(vecf0); vint32 veci1 = vec_signed(vecf1); vint32 veci2 = vec_signed(vecf2); vint32 veci3 = vec_signed(vecf3); vint32 veci4 = vec_signed(vecf4); vint32 veci5 = vec_signed(vecf5); vint32 veci6 = vec_signed(vecf6); vint32 veci7 = vec_signed(vecf7); // veci0 = vec_min(vmax, vec_max( vmin, vecf0)) ; // veci1 = vec_min(vmax, vec_max( vmin, vecf1)) ; // veci2 = vec_min(vmax, vec_max( vmin, vecf2)) ; // veci3 = vec_min(vmax, vec_max( vmin, vecf3)) ; // veci4 = vec_min(vmax, vec_max( vmin, vecf4)) ; // veci5 = vec_min(vmax, vec_max( vmin, vecf5)) ; // veci6 = vec_min(vmax, vec_max( vmin, vecf6)) ; // veci7 = vec_min(vmax, vec_max( vmin, vecf7)) ; // vec_packs CLAMP already vint16 vecshi0 = vec_packs(veci0, veci1); vint16 vecshi1 = vec_packs(veci2, veci3); vint16 vecshi2 = vec_packs(veci4, veci5); vint16 vecshi3 = vec_packs(veci6, veci7); vint8 vec0 = vec_packs(vecshi0, vecshi1); vint8 vec1 = vec_packs(vecshi2, vecshi3); return {vec0, vec1}; } Vectorized<c10::qint8> C10_ALWAYS_INLINE relu(Vectorized<c10::qint8> zero_point) const { return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)}; } Vectorized<c10::qint8> C10_ALWAYS_INLINE relu6(Vectorized<c10::qint8> zero_point, Vectorized<c10::qint8> q_six) const { vint8 max0 = vec_max(_vec0, zero_point._vec0); vint8 max1 = vec_max(_vec1, zero_point._vec1); return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)}; } int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const { vint16 vecshi0 = vec_unpackh(_vec0); vint16 vecBshi0 = vec_unpackh(b._vec0); vint16 vecshi1 = vec_unpackl(_vec0); vint16 vecBshi1 = vec_unpackl(b._vec0); vint16 vecshi2 = vec_unpackh(_vec1); vint16 vecBshi2 = vec_unpackh(b._vec1); vint16 vecshi3 = vec_unpackl(_vec1); vint16 vecBshi3 = vec_unpackl(b._vec1); vint32 veci0 = vec_unpackh(vecshi0); vint32 vecBi0 = vec_unpackh(vecBshi0); vint32 veci1 = vec_unpackl(vecshi0); vint32 vecBi1 = vec_unpackl(vecBshi0); vint32 veci2 = vec_unpackh(vecshi1); vint32 vecBi2 = vec_unpackh(vecBshi1); vint32 veci3 = vec_unpackl(vecshi1); vint32 vecBi3 = vec_unpackl(vecBshi1); vint32 veci4 = vec_unpackh(vecshi2); vint32 vecBi4 = vec_unpackh(vecBshi2); vint32 veci5 = vec_unpackl(vecshi2); vint32 vecBi5 = vec_unpackl(vecBshi2); vint32 veci6 = vec_unpackh(vecshi3); vint32 vecBi6 = vec_unpackh(vecBshi3); vint32 veci7 = vec_unpackl(vecshi3); vint32 vecBi7 = vec_unpackl(vecBshi3); return { Vectorized<c10::qint32>(veci0 - vecBi0, veci1 - vecBi1), Vectorized<c10::qint32>(veci2 - vecBi2, veci3 - vecBi3), Vectorized<c10::qint32>(veci4 - vecBi4, veci5 - vecBi5), Vectorized<c10::qint32>(veci6 - vecBi6, veci7 - vecBi7)}; } static Vectorized<c10::qint8> requantize_from_int( const int_vec_return_type& inp, float multiplier, int32_t zero_point) { vfloat32 vec_multiplier = vec_splats(multiplier); vint32 vec_zero_point = vec_splats(zero_point); Vectorized<c10::qint32> vi0 = inp[0]; Vectorized<c10::qint32> vi1 = inp[1]; Vectorized<c10::qint32> vi2 = inp[2]; Vectorized<c10::qint32> vi3 = inp[3]; vfloat32 vecf0 = vec_float(vi0.vec0()); vfloat32 vecf1 = vec_float(vi0.vec1()); vfloat32 vecf2 = vec_float(vi1.vec0()); vfloat32 vecf3 = vec_float(vi1.vec1()); vfloat32 vecf4 = vec_float(vi2.vec0()); vfloat32 vecf5 = vec_float(vi2.vec1()); vfloat32 vecf6 = vec_float(vi3.vec0()); vfloat32 vecf7 = vec_float(vi3.vec1()); vecf0 = vec_mul(vecf0, vec_multiplier); vecf1 = vec_mul(vecf1, vec_multiplier); vecf2 = vec_mul(vecf2, vec_multiplier); vecf3 = vec_mul(vecf3, vec_multiplier); vecf4 = vec_mul(vecf4, vec_multiplier); vecf5 = vec_mul(vecf5, vec_multiplier); vecf6 = vec_mul(vecf6, vec_multiplier); vecf7 = vec_mul(vecf7, vec_multiplier); vecf0 = vec_rint(vecf0); vecf1 = vec_rint(vecf1); vecf2 = vec_rint(vecf2); vecf3 = vec_rint(vecf3); vecf4 = vec_rint(vecf4); vecf5 = vec_rint(vecf5); vecf6 = vec_rint(vecf6); vecf7 = vec_rint(vecf7); vint32 veci0 = vec_signed(vecf0); vint32 veci1 = vec_signed(vecf1); vint32 veci2 = vec_signed(vecf2); vint32 veci3 = vec_signed(vecf3); vint32 veci4 = vec_signed(vecf4); vint32 veci5 = vec_signed(vecf5); vint32 veci6 = vec_signed(vecf6); vint32 veci7 = vec_signed(vecf7); veci0 = vec_add(veci0, vec_zero_point); veci1 = vec_add(veci1, vec_zero_point); veci2 = vec_add(veci2, vec_zero_point); veci3 = vec_add(veci3, vec_zero_point); veci4 = vec_add(veci4, vec_zero_point); veci5 = vec_add(veci5, vec_zero_point); veci6 = vec_add(veci6, vec_zero_point); veci7 = vec_add(veci7, vec_zero_point); vint16 vecshi0 = vec_packs(veci0, veci1); vint16 vecshi1 = vec_packs(veci2, veci3); vint16 vecshi2 = vec_packs(veci4, veci5); vint16 vecshi3 = vec_packs(veci6, veci7); vint8 vec0 = vec_packs(vecshi0, vecshi1); vint8 vec1 = vec_packs(vecshi2, vecshi3); return {vec0, vec1}; } DEFINE_MEMBER_OP(operator==, c10::qint8, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, c10::qint8, vec_cmpne) DEFINE_MEMBER_OP(operator<, c10::qint8, vec_cmplt) DEFINE_MEMBER_OP(operator<=, c10::qint8, vec_cmple) DEFINE_MEMBER_OP(operator>, c10::qint8, vec_cmpgt) DEFINE_MEMBER_OP(operator>=, c10::qint8, vec_cmpge) DEFINE_MEMBER_OP(operator+, c10::qint8, vec_add) DEFINE_MEMBER_OP(operator-, c10::qint8, vec_sub) DEFINE_MEMBER_OP(operator*, c10::qint8, vec_mul) DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint8, /) DEFINE_MEMBER_OP(maximum, c10::qint8, vec_max) DEFINE_MEMBER_OP(minimum, c10::qint8, vec_min) DEFINE_MEMBER_OP(operator&, c10::qint8, vec_and) DEFINE_MEMBER_OP(operator|, c10::qint8, vec_or) DEFINE_MEMBER_OP(operator^, c10::qint8, vec_xor) }; template <> Vectorized<c10::qint8> inline maximum( const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) { return a.maximum(b); } template <> Vectorized<c10::qint8> inline minimum( const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) { return a.minimum(b); } } // namespace } // namespace vec } // namespace at
13,476
32.947103
90
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h
#pragma once #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h> #include <c10/util/irange.h> #include <c10/util/quint8.h> #include <array> // This file defines Vectorized<> for the quantized types. // // // Currently, we simply use these classes as efficient converters between // the quantized types and Vectorized<float>, usually in bandwidth-bound cases // where doing the arithmetic in full-precision is acceptable (e.g. // elementwise operators). // // // Conversions are as follows: // Vectorized<quint8> -> 4x Vectorized<float> // // The size of the returned float vector is specified by the special // constexpr function float_num_vecs. The type of the value returned // from dequantize (and expected as an argument to quantize) is // specified by float_vec_return_type. // // When writing kernels with these vectors, it is expected that floating- // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs // iterations. namespace at { namespace vec { inline namespace CPU_CAPABILITY { const vint16 mask_unsigned = vec_splats((short int)0xFF); template <> struct Vectorized<c10::quint8> { private: union { struct { vuint8 _vec0; vuint8 _vec1; }; struct { vbool8 _vecb0; vbool8 _vecb1; }; } __attribute__((__may_alias__)); public: Vectorized() {} using size_type = int; static constexpr size_type size() { return 32; } static constexpr size_t float_num_vecs() { return 4; } static constexpr int int_num_vecs() { return 4; } using float_vec_return_type = std::array<Vectorized<float>, 4>; using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>; using value_type = typename c10::quint8::underlying; using vec_internal_type = vuint8; using vec_internal_mask_type = vbool8; // Broadcast constructor C10_ALWAYS_INLINE Vectorized(const c10::quint8& val) : _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {} C10_ALWAYS_INLINE Vectorized(const Vectorized<c10::quint8>& other) : _vec0{other._vec0}, _vec1(other._vec1) {} C10_ALWAYS_INLINE Vectorized(vuint8 v) : _vec0{v}, _vec1{v} {} C10_ALWAYS_INLINE Vectorized(vbool8 vmask) : _vecb0{vmask}, _vecb1{vmask} {} C10_ALWAYS_INLINE Vectorized(vuint8 v1, vuint8 v2) : _vec0{v1}, _vec1{v2} {} C10_ALWAYS_INLINE Vectorized(vbool8 v1, vbool8 v2) : _vecb0{v1}, _vecb1{v2} {} C10_ALWAYS_INLINE const vec_internal_type& vec0() const { return _vec0; } C10_ALWAYS_INLINE const vec_internal_type& vec1() const { return _vec1; } static C10_ALWAYS_INLINE Vectorized<c10::quint8> loadu( const void* ptr, int count = size()) { if (count == size()) { return { vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)), vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))}; } __at_align__ value_type tmp_values[size()]; std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type)); return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)}; } void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const { if (count == size()) { vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr)); vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr)); } else if (count > 0) { __at_align__ value_type tmp_values[size()]; vec_vsx_st(_vec0, offset0, tmp_values); vec_vsx_st(_vec1, offset16, tmp_values); std::memcpy( ptr, tmp_values, std::min(count, size()) * sizeof(value_type)); } } public: float_vec_return_type C10_ALWAYS_INLINE dequantize( Vectorized<float> scale, Vectorized<float> zero_point, Vectorized<float> scale_zp_premul) const { // unpacking unsigned as signed vint16 vecshi0 = vec_unpackh((vint8)_vec0); vint16 vecshi1 = vec_unpackl((vint8)_vec0); vint16 vecshi2 = vec_unpackh((vint8)_vec1); vint16 vecshi3 = vec_unpackl((vint8)_vec1); // signed -> unsigned vecshi0 = vec_and(vecshi0, mask_unsigned); vecshi1 = vec_and(vecshi1, mask_unsigned); vecshi2 = vec_and(vecshi2, mask_unsigned); vecshi3 = vec_and(vecshi3, mask_unsigned); vint32 veci0 = vec_unpackh(vecshi0); vint32 veci1 = vec_unpackl(vecshi0); vint32 veci2 = vec_unpackh(vecshi1); vint32 veci3 = vec_unpackl(vecshi1); vint32 veci4 = vec_unpackh(vecshi2); vint32 veci5 = vec_unpackl(vecshi2); vint32 veci6 = vec_unpackh(vecshi3); vint32 veci7 = vec_unpackl(vecshi3); vfloat32 vecf0_0 = vec_float(veci0); vfloat32 vecf1_0 = vec_float(veci1); vfloat32 vecf0_1 = vec_float(veci2); vfloat32 vecf1_1 = vec_float(veci3); vfloat32 vecf0_2 = vec_float(veci4); vfloat32 vecf1_2 = vec_float(veci5); vfloat32 vecf0_3 = vec_float(veci6); vfloat32 vecf1_3 = vec_float(veci7); vfloat32 scale_vec0 = scale.vec0(); vfloat32 scale_vec1 = scale.vec1(); vfloat32 scale_zp_premul0 = scale_zp_premul.vec0(); vfloat32 scale_zp_premul1 = scale_zp_premul.vec1(); return { Vectorized<float>{ vec_madd(scale_vec0, vecf0_0, scale_zp_premul0), vec_madd(scale_vec1, vecf1_0, scale_zp_premul1)}, Vectorized<float>{ vec_madd(scale_vec0, vecf0_1, scale_zp_premul0), vec_madd(scale_vec1, vecf1_1, scale_zp_premul1)}, Vectorized<float>{ vec_madd(scale_vec0, vecf0_2, scale_zp_premul0), vec_madd(scale_vec1, vecf1_2, scale_zp_premul1)}, Vectorized<float>{ vec_madd(scale_vec0, vecf0_3, scale_zp_premul0), vec_madd(scale_vec1, vecf1_3, scale_zp_premul1)}}; } static Vectorized<c10::quint8> quantize( const float_vec_return_type& rhs, float scale, int32_t zero_point, float inverse_scale) { // constexpr int32_t min_val = std::numeric_limits<value_type>::min(); // constexpr int32_t max_val = std::numeric_limits<value_type>::max(); vfloat32 vec_inverse = vec_splats(inverse_scale); vfloat32 vec_zero_point = vec_splats((float)zero_point); // vuint32 vmin = vec_splats(min_val); // vuint32 vmax = vec_splats(max_val); Vectorized<float> vf0 = rhs[0]; Vectorized<float> vf1 = rhs[1]; Vectorized<float> vf2 = rhs[2]; Vectorized<float> vf3 = rhs[3]; vfloat32 vecf0 = vf0.vec0(); vfloat32 vecf1 = vf0.vec1(); vfloat32 vecf2 = vf1.vec0(); vfloat32 vecf3 = vf1.vec1(); vfloat32 vecf4 = vf2.vec0(); vfloat32 vecf5 = vf2.vec1(); vfloat32 vecf6 = vf3.vec0(); vfloat32 vecf7 = vf3.vec1(); vecf0 = vec_mul(vecf0, vec_inverse); vecf1 = vec_mul(vecf1, vec_inverse); vecf2 = vec_mul(vecf2, vec_inverse); vecf3 = vec_mul(vecf3, vec_inverse); vecf4 = vec_mul(vecf4, vec_inverse); vecf5 = vec_mul(vecf5, vec_inverse); vecf6 = vec_mul(vecf6, vec_inverse); vecf7 = vec_mul(vecf7, vec_inverse); vecf0 = vec_add(vec_rint(vecf0), vec_zero_point); vecf1 = vec_add(vec_rint(vecf1), vec_zero_point); vecf2 = vec_add(vec_rint(vecf2), vec_zero_point); vecf3 = vec_add(vec_rint(vecf3), vec_zero_point); vecf4 = vec_add(vec_rint(vecf4), vec_zero_point); vecf5 = vec_add(vec_rint(vecf5), vec_zero_point); vecf6 = vec_add(vec_rint(vecf6), vec_zero_point); vecf7 = vec_add(vec_rint(vecf7), vec_zero_point); vint32 veci0 = vec_signed(vecf0); vint32 veci1 = vec_signed(vecf1); vint32 veci2 = vec_signed(vecf2); vint32 veci3 = vec_signed(vecf3); vint32 veci4 = vec_signed(vecf4); vint32 veci5 = vec_signed(vecf5); vint32 veci6 = vec_signed(vecf6); vint32 veci7 = vec_signed(vecf7); vint16 vecshi0 = vec_packs(veci0, veci1); vint16 vecshi1 = vec_packs(veci2, veci3); vint16 vecshi2 = vec_packs(veci4, veci5); vint16 vecshi3 = vec_packs(veci6, veci7); vuint8 vec0 = vec_packsu(vecshi0, vecshi1); vuint8 vec1 = vec_packsu(vecshi2, vecshi3); return {vec0, vec1}; } Vectorized<c10::quint8> C10_ALWAYS_INLINE relu(Vectorized<c10::quint8> zero_point) const { return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)}; } Vectorized<c10::quint8> C10_ALWAYS_INLINE relu6(Vectorized<c10::quint8> zero_point, Vectorized<c10::quint8> q_six) const { vuint8 max0 = vec_max(_vec0, zero_point._vec0); vuint8 max1 = vec_max(_vec1, zero_point._vec1); return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)}; } int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const { vint16 vecshi0 = vec_unpackh((vint8)_vec0); vint16 vecBshi0 = vec_unpackh((vint8)b._vec0); vint16 vecshi1 = vec_unpackl((vint8)_vec0); vint16 vecBshi1 = vec_unpackl((vint8)b._vec0); vint16 vecshi2 = vec_unpackh((vint8)_vec1); vint16 vecBshi2 = vec_unpackh((vint8)b._vec1); vint16 vecshi3 = vec_unpackl((vint8)_vec1); vint16 vecBshi3 = vec_unpackl((vint8)b._vec1); vecshi0 = vec_and(vecshi0, mask_unsigned); vecBshi0 = vec_and(vecBshi0, mask_unsigned); vecshi1 = vec_and(vecshi1, mask_unsigned); vecBshi1 = vec_and(vecBshi1, mask_unsigned); vecshi2 = vec_and(vecshi2, mask_unsigned); vecBshi2 = vec_and(vecBshi2, mask_unsigned); vecshi3 = vec_and(vecshi3, mask_unsigned); vecBshi3 = vec_and(vecBshi3, mask_unsigned); vint32 veci0 = vec_unpackh(vecshi0); vint32 vecBi0 = vec_unpackh(vecBshi0); vint32 veci1 = vec_unpackl(vecshi0); vint32 vecBi1 = vec_unpackl(vecBshi0); vint32 veci2 = vec_unpackh(vecshi1); vint32 vecBi2 = vec_unpackh(vecBshi1); vint32 veci3 = vec_unpackl(vecshi1); vint32 vecBi3 = vec_unpackl(vecBshi1); vint32 veci4 = vec_unpackh(vecshi2); vint32 vecBi4 = vec_unpackh(vecBshi2); vint32 veci5 = vec_unpackl(vecshi2); vint32 vecBi5 = vec_unpackl(vecBshi2); vint32 veci6 = vec_unpackh(vecshi3); vint32 vecBi6 = vec_unpackh(vecBshi3); vint32 veci7 = vec_unpackl(vecshi3); vint32 vecBi7 = vec_unpackl(vecBshi3); return { Vectorized<c10::qint32>(veci0 - vecBi0, veci1 - vecBi1), Vectorized<c10::qint32>(veci2 - vecBi2, veci3 - vecBi3), Vectorized<c10::qint32>(veci4 - vecBi4, veci5 - vecBi5), Vectorized<c10::qint32>(veci6 - vecBi6, veci7 - vecBi7)}; } static Vectorized<c10::quint8> requantize_from_int( const int_vec_return_type& inp, float multiplier, int32_t zero_point) { vfloat32 vec_multiplier = vec_splats(multiplier); vint32 vec_zero_point = vec_splats(zero_point); Vectorized<c10::qint32> vi0 = inp[0]; Vectorized<c10::qint32> vi1 = inp[1]; Vectorized<c10::qint32> vi2 = inp[2]; Vectorized<c10::qint32> vi3 = inp[3]; vfloat32 vecf0 = vec_float(vi0.vec0()); vfloat32 vecf1 = vec_float(vi0.vec1()); vfloat32 vecf2 = vec_float(vi1.vec0()); vfloat32 vecf3 = vec_float(vi1.vec1()); vfloat32 vecf4 = vec_float(vi2.vec0()); vfloat32 vecf5 = vec_float(vi2.vec1()); vfloat32 vecf6 = vec_float(vi3.vec0()); vfloat32 vecf7 = vec_float(vi3.vec1()); vecf0 = vec_mul(vecf0, vec_multiplier); vecf1 = vec_mul(vecf1, vec_multiplier); vecf2 = vec_mul(vecf2, vec_multiplier); vecf3 = vec_mul(vecf3, vec_multiplier); vecf4 = vec_mul(vecf4, vec_multiplier); vecf5 = vec_mul(vecf5, vec_multiplier); vecf6 = vec_mul(vecf6, vec_multiplier); vecf7 = vec_mul(vecf7, vec_multiplier); vecf0 = vec_rint(vecf0); vecf1 = vec_rint(vecf1); vecf2 = vec_rint(vecf2); vecf3 = vec_rint(vecf3); vecf4 = vec_rint(vecf4); vecf5 = vec_rint(vecf5); vecf6 = vec_rint(vecf6); vecf7 = vec_rint(vecf7); vint32 veci0 = vec_signed(vecf0); vint32 veci1 = vec_signed(vecf1); vint32 veci2 = vec_signed(vecf2); vint32 veci3 = vec_signed(vecf3); vint32 veci4 = vec_signed(vecf4); vint32 veci5 = vec_signed(vecf5); vint32 veci6 = vec_signed(vecf6); vint32 veci7 = vec_signed(vecf7); veci0 = vec_add(veci0, vec_zero_point); veci1 = vec_add(veci1, vec_zero_point); veci2 = vec_add(veci2, vec_zero_point); veci3 = vec_add(veci3, vec_zero_point); veci4 = vec_add(veci4, vec_zero_point); veci5 = vec_add(veci5, vec_zero_point); veci6 = vec_add(veci6, vec_zero_point); veci7 = vec_add(veci7, vec_zero_point); vint16 vecshi0 = vec_packs(veci0, veci1); vint16 vecshi1 = vec_packs(veci2, veci3); vint16 vecshi2 = vec_packs(veci4, veci5); vint16 vecshi3 = vec_packs(veci6, veci7); vuint8 vec0 = vec_packsu(vecshi0, vecshi1); vuint8 vec1 = vec_packsu(vecshi2, vecshi3); return {vec0, vec1}; } DEFINE_MEMBER_OP(operator==, c10::quint8, vec_cmpeq) DEFINE_MEMBER_OP(operator!=, c10::quint8, vec_cmpne) DEFINE_MEMBER_OP(operator<, c10::quint8, vec_cmplt) DEFINE_MEMBER_OP(operator<=, c10::quint8, vec_cmple) DEFINE_MEMBER_OP(operator>, c10::quint8, vec_cmpgt) DEFINE_MEMBER_OP(operator>=, c10::quint8, vec_cmpge) DEFINE_MEMBER_OP(operator+, c10::quint8, vec_add) DEFINE_MEMBER_OP(operator-, c10::quint8, vec_sub) DEFINE_MEMBER_OP(operator*, c10::quint8, vec_mul) DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::quint8, /) DEFINE_MEMBER_OP(maximum, c10::quint8, vec_max) DEFINE_MEMBER_OP(minimum, c10::quint8, vec_min) DEFINE_MEMBER_OP(operator&, c10::quint8, vec_and) DEFINE_MEMBER_OP(operator|, c10::quint8, vec_or) DEFINE_MEMBER_OP(operator^, c10::quint8, vec_xor) }; template <> Vectorized<c10::quint8> inline maximum( const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) { return a.maximum(b); } template <> Vectorized<c10::quint8> inline minimum( const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) { return a.minimum(b); } } // namespace } // namespace vec } // namespace at
13,851
32.95098
92
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec256/vsx/vsx_helpers.h
#pragma once #include <cstdint> #include <c10/macros/Macros.h> #include <ATen/cpu/vec/intrinsics.h> using vbool8 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) char; using vbool16 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) short; using vbool32 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) int; using vbool64 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) long long; using vint8 = __attribute__((altivec(vector__))) signed char; using vint16 = __attribute__((altivec(vector__))) signed short; using vint32 = __attribute__((altivec(vector__))) signed int; using vint64 = __attribute__((altivec(vector__))) signed long long; using vuint8 = __attribute__((altivec(vector__))) unsigned char; using vuint16 = __attribute__((altivec(vector__))) unsigned short; using vuint32 = __attribute__((altivec(vector__))) unsigned int; using vuint64 = __attribute__((altivec(vector__))) unsigned long long; using vfloat32 = __attribute__((altivec(vector__))) float; using vfloat64 = __attribute__((altivec(vector__))) double; #if !defined(vec_float) C10_ALWAYS_INLINE vfloat32 vec_float(const vint32& vec_in) { vfloat32 vec_out; __asm__("xvcvsxwsp %x0,%x1" : "=wf"(vec_out) : "wa"(vec_in)); return vec_out; } #endif #if !defined(vec_signed) C10_ALWAYS_INLINE vint32 vec_signed(const vfloat32& vec_in) { vint32 vec_out; __asm__("xvcvspsxws %x0,%x1" : "=wa"(vec_out) : "wf"(vec_in)); return vec_out; } C10_ALWAYS_INLINE vint64 vec_signed(const vfloat64& vec_in) { vint64 vec_out; __asm__("xvcvdpsxds %x0,%x1" : "=wa"(vec_out) : "wd"(vec_in)); return vec_out; } #endif #if !defined(vec_neg) C10_ALWAYS_INLINE vfloat32 vec_neg(const vfloat32& vec_in) { vfloat32 vec_out; __asm__("xvnegsp %x0,%x1" : "=wf"(vec_out) : "wf"(vec_in)); return vec_out; } C10_ALWAYS_INLINE vfloat64 vec_neg(const vfloat64& vec_in) { vfloat64 vec_out; __asm__("xvnegdp %x0,%x1" : "=wd"(vec_out) : "wd"(vec_in)); return vec_out; } C10_ALWAYS_INLINE vint16 vec_neg(const vint16& vec_in) { vint16 vint0 = {0, 0, 0, 0 ,0, 0, 0, 0}; return vec_vsubuhm(vint0, vec_in); } C10_ALWAYS_INLINE vint32 vec_neg(const vint32& vec_in) { vint32 vint0 = {0, 0, 0, 0}; return vec_vsubuwm(vint0, vec_in); } C10_ALWAYS_INLINE vint64 vec_neg(const vint64& vec_in) { vint64 vint0 = {0, 0}; return vec_vsubudm(vint0, vec_in); } #endif #if !defined(vec_sldw) template <unsigned int C> C10_ALWAYS_INLINE vfloat32 vec_sldw_aux(const vfloat32& vec_in0, const vfloat32& vec_in1) { vfloat32 vec_out; __asm("xxsldwi %x0, %x1, %x2, %3 " : "=wa"(vec_out) : "wa"(vec_in0), "wa"(vec_in1), "I"(C)); return vec_out; } #define vec_sldw(a, b, c) vec_sldw_aux<c>(a, b) #endif #define vec_not(a) vec_nor(a, a) // Vectorized min/max which return a if any operand is nan template <class T> C10_ALWAYS_INLINE T vec_min_nan(const T& a, const T& b) { return vec_min(a, b); } template <class T> C10_ALWAYS_INLINE T vec_max_nan(const T& a, const T& b) { return vec_max(a, b); } // Specializations for float/double taken from Eigen template<> C10_ALWAYS_INLINE vfloat32 vec_min_nan<vfloat32>(const vfloat32& a, const vfloat32& b) { // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN vfloat32 ret; __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); return ret; } // Specializations for float/double taken from Eigen template<> C10_ALWAYS_INLINE vfloat32 vec_max_nan<vfloat32>(const vfloat32& a, const vfloat32& b) { // NOTE: about 10% slower than vec_max, but consistent with std::min and SSE regarding NaN vfloat32 ret; __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); return ret; } template<> C10_ALWAYS_INLINE vfloat64 vec_min_nan<vfloat64>(const vfloat64& a, const vfloat64& b) { // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN vfloat64 ret; __asm__ ("xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); return ret; } template<> C10_ALWAYS_INLINE vfloat64 vec_max_nan<vfloat64>(const vfloat64& a, const vfloat64& b) { // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN vfloat64 ret; __asm__ ("xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b)); return ret; } // Vectorizes min/max function which returns nan if any side is nan #define C10_VSX_VEC_NAN_PROPAG(name, type, btype, func) \ C10_ALWAYS_INLINE type name(const type& a, const type& b) { \ type tmp = func(a, b); \ btype nan_a = vec_cmpne(a, a); \ btype nan_b = vec_cmpne(b, b); \ tmp = vec_sel(tmp, a, nan_a); \ return vec_sel(tmp, b, nan_b); \ } C10_VSX_VEC_NAN_PROPAG(vec_min_nan2, vfloat32, vbool32, vec_min) C10_VSX_VEC_NAN_PROPAG(vec_max_nan2, vfloat32, vbool32, vec_max) C10_VSX_VEC_NAN_PROPAG(vec_min_nan2, vfloat64, vbool64, vec_min) C10_VSX_VEC_NAN_PROPAG(vec_max_nan2, vfloat64, vbool64, vec_max) #undef C10_VSX_VEC_NAN_PROPAG #define DEFINE_MEMBER_UNARY_OP(op, op_type, func) \ Vectorized<op_type> C10_ALWAYS_INLINE op() const { \ return Vectorized<op_type>{func(_vec0), func(_vec1)}; \ } #define DEFINE_MEMBER_OP(op, op_type, func) \ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \ return Vectorized<op_type>{ \ func(_vec0, other._vec0), func(_vec1, other._vec1)}; \ } #define DEFINE_MEMBER_BITWISE_OP(op, op_type, func) \ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \ return Vectorized<op_type>{ \ func(_vecb0, other._vecb0), func(_vecb1, other._vecb1)}; \ } #define DEFINE_MEMBER_TERNARY_OP(op, op_type, func) \ Vectorized<op_type> C10_ALWAYS_INLINE op( \ const Vectorized<op_type>& b, const Vectorized<op_type>& c) const { \ return Vectorized<op_type>{ \ func(_vec0, b._vec0, c._vec0), func(_vec1, b._vec1, c._vec1)}; \ } #define DEFINE_MEMBER_EMULATE_BINARY_OP(op, op_type, binary_op) \ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& b) const { \ Vectorized<op_type>::vec_internal_type ret_0; \ Vectorized<op_type>::vec_internal_type ret_1; \ for (int i = 0; i < Vectorized<op_type>::size() / 2; i++) { \ ret_0[i] = _vec0[i] binary_op b._vec0[i]; \ ret_1[i] = _vec1[i] binary_op b._vec1[i]; \ } \ return Vectorized<op_type>{ret_0, ret_1}; \ } #define DEFINE_MEMBER_OP_AND_ONE(op, op_type, func) \ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \ using vvtype = Vectorized<op_type>::vec_internal_type; \ const vvtype v_one = vec_splats(static_cast<op_type>(1.0)); \ vvtype ret0 = (vvtype)func(_vec0, other._vec0); \ vvtype ret1 = (vvtype)func(_vec1, other._vec1); \ return Vectorized<op_type>{vec_and(ret0, v_one), vec_and(ret1, v_one)}; \ } #define DEFINE_CLAMP_FUNCS(operand_type) \ template <> \ Vectorized<operand_type> C10_ALWAYS_INLINE clamp( \ const Vectorized<operand_type>& a, \ const Vectorized<operand_type>& min, \ const Vectorized<operand_type>& max) { \ return Vectorized<operand_type>{ \ vec_min_nan(vec_max_nan(a.vec0(), min.vec0()), max.vec0()), \ vec_min_nan(vec_max_nan(a.vec1(), min.vec1()), max.vec1())}; \ } \ template <> \ Vectorized<operand_type> C10_ALWAYS_INLINE clamp_min( \ const Vectorized<operand_type>& a, const Vectorized<operand_type>& min) { \ return Vectorized<operand_type>{ \ vec_max_nan(a.vec0(), min.vec0()), \ vec_max_nan(a.vec1(), min.vec1())}; \ } \ template <> \ Vectorized<operand_type> C10_ALWAYS_INLINE clamp_max( \ const Vectorized<operand_type>& a, const Vectorized<operand_type>& max) { \ return Vectorized<operand_type>{ \ vec_min_nan(a.vec0(), max.vec0()), \ vec_min_nan(a.vec1(), max.vec1())}; \ } #define DEFINE_REINTERPRET_CAST_FUNCS( \ first_type, cast_type, cast_inner_vector_type) \ template <> \ C10_ALWAYS_INLINE Vectorized<cast_type> cast<cast_type, first_type>( \ const Vectorized<first_type>& src) { \ return Vectorized<cast_type>{(cast_inner_vector_type)src.vec0(), \ (cast_inner_vector_type)src.vec1()}; \ } #define DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(first_type) \ DEFINE_REINTERPRET_CAST_FUNCS(first_type, double, vfloat64) \ DEFINE_REINTERPRET_CAST_FUNCS(first_type, float, vfloat32) \ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int64_t, vint64) \ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int32_t, vint32) \ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int16_t, vint16) // it can be used to emulate blend faster constexpr int blendChoice(uint32_t mask, uint32_t half1 = 0xF, uint32_t half2 = 0xF0) { uint32_t none = 0; uint32_t both = half1 | half2; // clamp it between 0 and both mask = mask & both; // return (a._vec0, a._vec1) if (mask == none) return 0; // return (b._vec0,b._vec1) else if (mask == both) return 1; // return (b._vec0,a._vec1) else if (mask == half1) return 2; // return (a._vec0,b._vec1) else if (mask == half2) return 3; // return (*_vec0,a._vec1) else if (mask > 0 && mask < half1) return 4; // return (*_vec0,b._vec1) else if ((mask & half2) == half2) return 5; // return (a._vec0,*_vec1) else if ((mask & half1) == 0 && mask > half1) return 6; // return (b._vec0,*_vec1) else if ((mask & half1) == half1 && mask > half1) return 7; // return (*_vec0,*_vec1) return 8; } // it can be used to emulate blend faster constexpr int blendChoiceDbl(uint32_t mask) { // clamp it 0 and 0xF return blendChoice(mask, 0x3, 0xC); } constexpr vbool32 VsxMask1(uint32_t mask) { uint32_t g0 = (mask & 1) * 0xffffffff; uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff; uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff; uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff; return (vbool32){g0, g1, g2, g3}; } constexpr vbool32 VsxMask2(uint32_t mask) { uint32_t mask2 = (mask & 0xFF) >> 4; return VsxMask1(mask2); } constexpr vbool64 VsxDblMask1(uint32_t mask) { uint64_t g0 = (mask & 1) * 0xffffffffffffffff; uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff; return (vbool64){g0, g1}; } constexpr vbool64 VsxDblMask2(uint32_t mask) { uint32_t mask2 = (mask & 0xF) >> 2; return VsxDblMask1(mask2); } constexpr int maskForComplex(uint32_t mask) { mask = mask & 0xF; int complex_mask = 0; if (mask & 1) complex_mask |= 3; if (mask & 2) complex_mask |= (3 << 2); if (mask & 4) complex_mask |= (3 << 4); if (mask & 8) complex_mask |= (3 << 6); return complex_mask; } constexpr int maskForComplexDbl(uint32_t mask) { mask = mask & 0x3; int complex_mask = 0; if (mask & 1) complex_mask |= 3; if (mask & 2) complex_mask |= (3 << 2); return complex_mask; } constexpr int blendChoiceComplex(uint32_t mask) { return blendChoice(maskForComplex(mask)); } constexpr int blendChoiceComplexDbl(uint32_t mask) { return blendChoiceDbl(maskForComplexDbl(mask)); } constexpr vbool32 VsxComplexMask1(uint32_t mask) { return VsxMask1(maskForComplex(mask)); } constexpr vbool32 VsxComplexMask2(uint32_t mask) { uint32_t mask2 = (mask & 0xF) >> 2; return VsxMask1(maskForComplex(mask2)); } constexpr vbool64 VsxComplexDblMask1(uint32_t mask) { return VsxDblMask1(mask); } constexpr vbool64 VsxComplexDblMask2(uint32_t mask) { uint32_t mask2 = (mask & 0xF) >> 2; return VsxDblMask1(mask2); } // constants namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { // constexpr int offset0 = 0; constexpr int offset16 = 16; // #Constants const vuint8 mask_zero_bits = vuint8{128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 96, 64, 32, 0}; const vuint8 swap_mask = vuint8{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}; const vint32 v0x7f = vec_splats(0x7f); const vint32 vi_0 = vec_splats((int)(0)); const vint32 vi_1 = vec_splats((int)1); const vint32 vi_2 = vec_splats((int)2); const vint32 vi_4 = vec_splats((int)4); const vint32 vi_inv1 = vec_splats((int)~1); const vuint32 vu_29 = vec_splats(29u); const vuint32 vu_23 = vec_splats(23u); const vbool32 inv_mant_mask = (vbool32)vec_splats((unsigned int)~0xff800000); const vbool32 sign_mask = (vbool32)vec_splats((int)0x80000000); const vbool32 real_mask = vbool32{0xFFFFFFFF, 0x0, 0xFFFFFFFF, 0x0}; const vbool32 imag_mask = vbool32{0x0, 0xFFFFFFFF, 0x0, 0xFFFFFFFF}; const vbool32 isign_mask = vbool32{0x0, 0x80000000, 0x0, 0x80000000}; const vbool32 rsign_mask = vbool32{0x80000000, 0x0, 0x80000000, 0x0}; const vbool64 vd_imag_mask = vbool64{0x0, 0xFFFFFFFFFFFFFFFF}; const vbool64 vd_real_mask = vbool64{0xFFFFFFFFFFFFFFFF, 0x0}; const vbool64 vd_isign_mask = vbool64{0x0, 0x8000000000000000}; const vbool64 vd_rsign_mask = vbool64{0x8000000000000000, 0x0}; const vfloat32 zero = vec_splats(0.f); const vfloat32 half = vec_splats(0.5f); const vfloat32 one = vec_splats(1.f); const vfloat32 two = vec_splats(2.0f); const vfloat32 _4div_pi = vec_splats(1.27323954473516f); const vfloat32 v_inf = (vfloat32)vec_splats(0x7f800000u); const vfloat32 v_minus_inf = vfloat32{ 0xff800000u, 0xff800000u, 0xff800000u, 0xff800000u }; const vfloat32 v_nan = (vfloat32)vec_splats(0x7fffffff); const vfloat32 log10e_inv = vec_splats(0.43429448190325176f); const vfloat32 log2e_inv = vec_splats(1.4426950408889634f); const vfloat32 log2eB_inv = vec_splats(1.442695036924675f); const vfloat32 cephes_SQRTHF = vec_splats(0.707106781186547524f); const vfloat32 coscof_p0 = vec_splats(2.443315711809948E-005f); const vfloat32 coscof_p1 = vec_splats(-1.388731625493765E-003f); const vfloat32 coscof_p2 = vec_splats(4.166664568298827E-002f); const vfloat32 exp_hi = vec_splats(104.f); const vfloat32 exp_lo = vec_splats(-104.f); const vfloat32 exp_p0 = vec_splats(0.000198527617612853646278381f); const vfloat32 exp_p1 = vec_splats((0.00139304355252534151077271f)); const vfloat32 exp_p2 = vec_splats(0.00833336077630519866943359f); const vfloat32 exp_p3 = vec_splats(0.0416664853692054748535156f); const vfloat32 exp_p4 = vec_splats(0.166666671633720397949219f); const vfloat32 exp_p5 = vec_splats(0.5f); const vfloat32 log_p0 = vec_splats(7.0376836292E-2f); const vfloat32 log_p1 = vec_splats(-1.1514610310E-1f); const vfloat32 log_p2 = vec_splats(1.1676998740E-1f); const vfloat32 log_p3 = vec_splats(-1.2420140846E-1f); const vfloat32 log_p4 = vec_splats(+1.4249322787E-1f); const vfloat32 log_p5 = vec_splats(-1.6668057665E-1f); const vfloat32 log_p6 = vec_splats(+2.0000714765E-1f); const vfloat32 log_p7 = vec_splats(-2.4999993993E-1f); const vfloat32 log_p8 = vec_splats(+3.3333331174E-1f); const vfloat32 log_q1 = vec_splats(-2.12194440e-4f); const vfloat32 log_q2 = vec_splats(0.693359375f); const vfloat32 max_logf = vec_splats(88.02969187150841f); const vfloat32 max_numf = vec_splats(1.7014117331926442990585209174225846272e38f); const vfloat32 min_inf = (vfloat32)vec_splats(0xff800000u); const vfloat32 min_norm_pos = (vfloat32)vec_splats(0x0800000u); const vfloat32 minus_cephes_dp1 = vec_splats(-0.78515625f); const vfloat32 minus_cephes_dp2 = vec_splats(-2.4187564849853515625e-4f); const vfloat32 minus_cephes_dp3 = vec_splats(-3.77489497744594108e-8f); const vfloat32 negln2f_hi = vec_splats(-0.693145751953125f); const vfloat32 negln2f_lo = vec_splats(-1.428606765330187045e-06f); const vfloat32 p0 = vec_splats(2.03721912945E-4f); const vfloat32 p1 = vec_splats(8.33028376239E-3f); const vfloat32 p2 = vec_splats(1.66667160211E-1f); const vfloat32 sincof_p0 = vec_splats(-1.9515295891E-4f); const vfloat32 sincof_p1 = vec_splats(8.3321608736E-3f); const vfloat32 sincof_p2 = vec_splats(-1.6666654611E-1f); const vfloat32 tanh_0p625 = vec_splats(0.625f); const vfloat32 tanh_half_max = vec_splats(44.014845935754205f); const vfloat32 tanh_p0 = vec_splats(-5.70498872745E-3f); const vfloat32 tanh_p1 = vec_splats(2.06390887954E-2f); const vfloat32 tanh_p2 = vec_splats(-5.37397155531E-2f); const vfloat32 tanh_p3 = vec_splats(1.33314422036E-1f); const vfloat32 tanh_p4 = vec_splats(-3.33332819422E-1f); const vfloat32 vcheck = vec_splats((float)(1LL << 24)); const vfloat32 imag_one = vfloat32{0.f, 1.f, 0.f, 1.f}; const vfloat32 imag_half = vfloat32{0.f, 0.5f, 0.f, 0.5f}; const vfloat32 sqrt2_2 = vfloat32{0.70710676908493042f, 0.70710676908493042, 0.70710676908493042, 0.70710676908493042}; const vfloat32 pi_2 = vfloat32{M_PI / 2, 0.0, M_PI / 2, 0.0}; const vfloat32 vf_89 = vfloat32{89.f, 89.f, 89.f, 89.f}; const vfloat64 vd_one = vec_splats(1.0); const vfloat64 vd_zero = vec_splats(0.0); const vfloat64 vd_log10e_inv = vec_splats(0.43429448190325176); const vfloat64 vd_log2e_inv = vec_splats(1.4426950408889634); const vfloat64 vd_imag_one = vfloat64{0.0, 1.0}; const vfloat64 vd_imag_half = vfloat64{0.0, 0.5}; const vfloat64 vd_sqrt2_2 = vfloat64{0.70710678118654757, 0.70710678118654757}; const vfloat64 vd_pi_2 = vfloat64{M_PI / 2.0, 0.0}; } // namespace } // namespace vec } // namespace at
19,060
40.984581
98
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec512/vec512.h
#pragma once // DO NOT DEFINE STATIC DATA IN THIS HEADER! // See Note [Do not compile initializers with AVX] #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <ATen/cpu/vec/vec512/vec512_float.h> #include <ATen/cpu/vec/vec512/vec512_bfloat16.h> #include <ATen/cpu/vec/vec512/vec512_double.h> #include <ATen/cpu/vec/vec512/vec512_int.h> #include <ATen/cpu/vec/vec512/vec512_qint.h> #include <ATen/cpu/vec/vec512/vec512_complex_float.h> #include <ATen/cpu/vec/vec512/vec512_complex_double.h> #include <algorithm> #include <cstddef> #include <cstdint> #include <cstring> #include <iostream> namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) { stream << val.val_; return stream; } inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) { stream << static_cast<int>(val.val_); return stream; } inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) { stream << static_cast<unsigned int>(val.val_); return stream; } template <typename T> std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) { T buf[Vectorized<T>::size()]; vec.store(buf); stream << "vec["; for (int i = 0; i != Vectorized<T>::size(); i++) { if (i != 0) { stream << ", "; } stream << buf[i]; } stream << "]"; return stream; } #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX512) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<> inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) { return _mm512_castpd_ps(src); } template<> inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) { return _mm512_castps_pd(src); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<int64_t scale = 1> std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>> inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) { return _mm512_i64gather_pd(vindex, base_addr, scale); } template<int64_t scale = 1> std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>> inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) { return _mm512_i32gather_ps(vindex, base_addr, scale); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<int64_t scale = 1> std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>> inline mask_gather(const Vectorized<double>& src, const double* base_addr, const Vectorized<int64_t>& vindex, const Vectorized<double>& mask) { auto all_ones = _mm512_castsi512_pd(_mm512_set1_epi64(0xFFFFFFFFFFFFFFFF)); auto mask_ = _mm512_cmp_pd_mask(all_ones, mask.values, _CMP_EQ_OQ); return _mm512_mask_i64gather_pd(src, mask_, vindex, base_addr, scale); } template<int64_t scale = 1> std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>> inline mask_gather(const Vectorized<float>& src, const float* base_addr, const Vectorized<int32_t>& vindex, const Vectorized<float>& mask) { auto all_ones = _mm512_castsi512_ps(_mm512_set1_epi32(0xFFFFFFFF)); auto mask_ = _mm512_cmp_ps_mask(all_ones, mask.values, _CMP_EQ_OQ); return _mm512_mask_i32gather_ps(src, mask_, vindex, base_addr, scale); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<> Vectorized<int64_t> inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) { return _mm512_cvtpd_epi64(src); } template<> Vectorized<int32_t> inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) { return _mm512_cvttps_epi32(src); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <> std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) { // inputs: // a = {a0, a1, a3, a3, a4, a5, a6, a7} // b = {b0, b1, b2, b3, b4, b5, b6, b7} // group cols crossing lanes: // return {a0, b0, a1, b1, a2, b2, a3, b3} // {a4, b4, a5, b5, a6, b6, a7, b7} __m512i idx1 = _mm512_set_epi64(11, 3, 10, 2, 9, 1, 8, 0); __m512i idx2 = _mm512_set_epi64(15, 7, 14, 6, 13, 5, 12, 4); return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); } template <> std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) { // inputs: // a = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} // b = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} // // return: // {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} // {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} __m512i idx1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4, 19, 3, 18, 2, 17, 1, 16, 0); __m512i idx2 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12, 27, 11, 26, 10, 25, 9, 24, 8); return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <> std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) { // inputs: // a = {a0, b0, a1, b1, a2, b2, a3, b3} // b = {a4, b4, a5, b5, a6, b6, a7, b7} // output: // return {a0, a1, a2, a3, a4, a5, a6, a7} // {b0, b1, b2, b3, b4, b5, b6, b7} // The members of indices have been written in binary format for better understandability __m512i idx1 = _mm512_set_epi64(14, 12, 10, 8, 6, 4, 2, 0); __m512i idx2 = _mm512_set_epi64(15, 13, 11, 9, 7, 5, 3, 1); return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b), _mm512_mask_permutex2var_pd(a, 0xff, idx2, b)); } template <> std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) { // inputs: // a = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7} // b = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15} // output: // return {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} // {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15} __m512i idx1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0); __m512i idx2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 7, 5, 3, 1); return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b), _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b)); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template<> inline Vectorized<float> flip(const Vectorized<float> & v) { const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return _mm512_permutexvar_ps(mask, v); } template<> inline Vectorized<double> flip(const Vectorized<double> & v) { const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); return _mm512_permutexvar_pd(mask, v); } template<> inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) { const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7); return _mm512_permutexvar_epi64(mask, v); } template<> inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) { const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return _mm512_permutexvar_epi32(mask, v); } template<> inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) { const __m512i mask = _mm512_set_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 ); return _mm512_permutexvar_epi16(mask, v); } inline __m512i flip8(const __m512i & v) { const __m512i mask1 = _mm512_set_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ); const __m512i mask2 = _mm512_set_epi64(1, 0, 3, 2, 5, 4, 7, 6); auto reversed_vec = _mm512_shuffle_epi8(v, mask1); return _mm512_permutexvar_epi64(mask2, reversed_vec); } template<> inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) { return flip8(v); } template<> inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) { return flip8(v); } #endif // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) }}}
9,257
35.448819
91
h
null
pytorch-main/aten/src/ATen/cpu/vec/vec512/vec512_double.h
#pragma once // DO NOT DEFINE STATIC DATA IN THIS HEADER! // See Note [Do not compile initializers with AVX] #include <ATen/cpu/vec/intrinsics.h> #include <ATen/cpu/vec/vec_base.h> #include <c10/util/irange.h> #if (defined(CPU_CAPABILITY_AVX512)) && !defined(_MSC_VER) #include <sleef.h> #endif namespace at { namespace vec { // See Note [CPU_CAPABILITY namespace] inline namespace CPU_CAPABILITY { #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER) template <> class Vectorized<double> { private: static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0}; public: // values needs to be public for compilation with clang // as vec512.h uses it __m512d values; using value_type = double; using size_type = int; static constexpr size_type size() { return 8; } Vectorized() {} Vectorized(__m512d v) : values(v) {} Vectorized(double val) { values = _mm512_set1_pd(val); } Vectorized(double val1, double val2, double val3, double val4, double val5, double val6, double val7, double val8) { values = _mm512_setr_pd(val1, val2, val3, val4, val5, val6, val7, val8); } operator __m512d() const { return values; } template <int64_t mask> static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm512_mask_blend_pd(mask, a.values, b.values); } static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& mask) { auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF); auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask.values), all_ones, _MM_CMPINT_EQ); return _mm512_mask_blend_pd(mmask, a.values, b.values); } template<typename step_t> static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) { return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step, base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step); } static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b, int64_t count = size()) { switch (count) { case 0: return a; case 1: return blend<1>(a, b); case 2: return blend<3>(a, b); case 3: return blend<7>(a, b); case 4: return blend<15>(a, b); case 5: return blend<31>(a, b); case 6: return blend<63>(a, b); case 7: return blend<127>(a, b); } return b; } static Vectorized<double> loadu(const void* ptr, int64_t count = size()) { if (count == size()) return _mm512_loadu_pd(reinterpret_cast<const double*>(ptr)); __at_align__ double tmp_values[size()]; // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502 // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two // instructions while a loop would be compiled to one instruction. for (const auto i : c10::irange(size())) { tmp_values[i] = 0.0; } std::memcpy( tmp_values, reinterpret_cast<const double*>(ptr), count * sizeof(double)); return _mm512_load_pd(tmp_values); } void store(void* ptr, int count = size()) const { if (count == size()) { _mm512_storeu_pd(reinterpret_cast<double*>(ptr), values); } else if (count > 0) { double tmp_values[size()]; _mm512_storeu_pd(reinterpret_cast<double*>(tmp_values), values); std::memcpy(ptr, tmp_values, count * sizeof(double)); } } const double& operator[](int idx) const = delete; double& operator[](int idx) = delete; int zero_mask() const { // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit __mmask8 cmp = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_EQ_OQ); return static_cast<int32_t>(cmp); } Vectorized<double> isnan() const { auto cmp_mask = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_UNORD_Q); return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, 0xFFFFFFFFFFFFFFFF)); } Vectorized<double> map(double (*const f)(double)) const { __at_align__ double tmp[size()]; store(tmp); for (const auto i : c10::irange(size())) { tmp[i] = f(tmp[i]); } return loadu(tmp); } Vectorized<double> abs() const { auto mask = _mm512_set1_pd(-0.f); return _mm512_andnot_pd(mask, values); } Vectorized<double> angle() const { const auto zero_vec = _mm512_castsi512_pd(zero_vector); const auto nan_vec = _mm512_set1_pd(NAN); const auto not_nan_mask = _mm512_cmp_pd_mask(values, values, _CMP_EQ_OQ); const auto not_nan = _mm512_mask_set1_epi64(zero_vector, not_nan_mask, 0xFFFFFFFFFFFFFFFF); const auto nan_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(not_nan), zero_vec, _CMP_EQ_OQ); const auto pi = _mm512_set1_pd(c10::pi<double>); const auto neg_mask = _mm512_cmp_pd_mask(values, zero_vec, _CMP_LT_OQ); auto angle = _mm512_mask_blend_pd(neg_mask, zero_vec, pi); angle = _mm512_mask_blend_pd(nan_mask, angle, nan_vec); return angle; } Vectorized<double> real() const { return *this; } Vectorized<double> imag() const { return _mm512_set1_pd(0); } Vectorized<double> conj() const { return *this; } Vectorized<double> acos() const { return Vectorized<double>(Sleef_acosd8_u10(values)); } Vectorized<double> asin() const { return Vectorized<double>(Sleef_asind8_u10(values)); } Vectorized<double> atan() const { return Vectorized<double>(Sleef_atand8_u10(values)); } Vectorized<double> atan2(const Vectorized<double> &b) const { return Vectorized<double>(Sleef_atan2d8_u10(values, b)); } Vectorized<double> copysign(const Vectorized<double> &sign) const { return Vectorized<double>(Sleef_copysignd8(values, sign)); } Vectorized<double> erf() const { return Vectorized<double>(Sleef_erfd8_u10(values)); } Vectorized<double> erfc() const { return Vectorized<double>(Sleef_erfcd8_u15(values)); } Vectorized<double> erfinv() const { return map(calc_erfinv); } Vectorized<double> exp() const { return Vectorized<double>(Sleef_expd8_u10(values)); } Vectorized<double> exp2() const { return Vectorized<double>(Sleef_exp2d8_u10(values)); } Vectorized<double> expm1() const { return Vectorized<double>(Sleef_expm1d8_u10(values)); } Vectorized<double> fmod(const Vectorized<double>& q) const { return Vectorized<double>(Sleef_fmodd8(values, q)); } Vectorized<double> hypot(const Vectorized<double> &b) const { return Vectorized<double>(Sleef_hypotd8_u05(values, b)); } Vectorized<double> i0() const { return map(calc_i0); } Vectorized<double> i0e() const { return map(calc_i0e); } Vectorized<double> igamma(const Vectorized<double> &x) const { __at_align__ double tmp[size()]; __at_align__ double tmp_x[size()]; store(tmp); x.store(tmp_x); for (const auto i : c10::irange(size())) { tmp[i] = calc_igamma(tmp[i], tmp_x[i]); } return loadu(tmp); } Vectorized<double> igammac(const Vectorized<double> &x) const { __at_align__ double tmp[size()]; __at_align__ double tmp_x[size()]; store(tmp); x.store(tmp_x); for (const auto i : c10::irange(size())) { tmp[i] = calc_igammac(tmp[i], tmp_x[i]); } return loadu(tmp); } Vectorized<double> log() const { return Vectorized<double>(Sleef_logd8_u10(values)); } Vectorized<double> log2() const { return Vectorized<double>(Sleef_log2d8_u10(values)); } Vectorized<double> log10() const { return Vectorized<double>(Sleef_log10d8_u10(values)); } Vectorized<double> log1p() const { return Vectorized<double>(Sleef_log1pd8_u10(values)); } Vectorized<double> sin() const { return Vectorized<double>(Sleef_sind8_u10(values)); } Vectorized<double> sinh() const { return Vectorized<double>(Sleef_sinhd8_u10(values)); } Vectorized<double> cos() const { return Vectorized<double>(Sleef_cosd8_u10(values)); } Vectorized<double> cosh() const { return Vectorized<double>(Sleef_coshd8_u10(values)); } Vectorized<double> ceil() const { return _mm512_ceil_pd(values); } Vectorized<double> floor() const { return _mm512_floor_pd(values); } Vectorized<double> frac() const; Vectorized<double> neg() const { return _mm512_xor_pd(_mm512_set1_pd(-0.), values); } Vectorized<double> nextafter(const Vectorized<double> &b) const { return Vectorized<double>(Sleef_nextafterd8(values, b)); } Vectorized<double> round() const { return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC)); } Vectorized<double> tan() const { return Vectorized<double>(Sleef_tand8_u10(values)); } Vectorized<double> tanh() const { return Vectorized<double>(Sleef_tanhd8_u10(values)); } Vectorized<double> trunc() const { return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC)); } Vectorized<double> lgamma() const { return Vectorized<double>(Sleef_lgammad8_u10(values)); } Vectorized<double> sqrt() const { return _mm512_sqrt_pd(values); } Vectorized<double> reciprocal() const { return _mm512_div_pd(_mm512_set1_pd(1), values); } Vectorized<double> rsqrt() const { return _mm512_div_pd(_mm512_set1_pd(1), _mm512_sqrt_pd(values)); } Vectorized<double> pow(const Vectorized<double> &b) const { return Vectorized<double>(Sleef_powd8_u10(values, b)); } // Comparison using the _CMP_**_OQ predicate. // `O`: get false if an operand is NaN // `Q`: do not raise if an operand is NaN Vectorized<double> operator==(const Vectorized<double>& other) const { auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ); return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, 0xFFFFFFFFFFFFFFFF)); } Vectorized<double> operator!=(const Vectorized<double>& other) const { auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ); return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, 0xFFFFFFFFFFFFFFFF)); } Vectorized<double> operator<(const Vectorized<double>& other) const { auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LT_OQ); return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, 0xFFFFFFFFFFFFFFFF)); } Vectorized<double> operator<=(const Vectorized<double>& other) const { auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LE_OQ); return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, 0xFFFFFFFFFFFFFFFF)); } Vectorized<double> operator>(const Vectorized<double>& other) const { auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GT_OQ); return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, 0xFFFFFFFFFFFFFFFF)); } Vectorized<double> operator>=(const Vectorized<double>& other) const { auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GE_OQ); return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask, 0xFFFFFFFFFFFFFFFF)); } Vectorized<double> eq(const Vectorized<double>& other) const; Vectorized<double> ne(const Vectorized<double>& other) const; Vectorized<double> lt(const Vectorized<double>& other) const; Vectorized<double> le(const Vectorized<double>& other) const; Vectorized<double> gt(const Vectorized<double>& other) const; Vectorized<double> ge(const Vectorized<double>& other) const; }; template <> Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm512_add_pd(a, b); } template <> Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm512_sub_pd(a, b); } template <> Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm512_mul_pd(a, b); } template <> Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm512_div_pd(a, b); } // frac. Implement this here so we can use subtraction. inline Vectorized<double> Vectorized<double>::frac() const { return *this - this->trunc(); } // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if // either input is a NaN. template <> Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) { auto zero_vec = _mm512_set1_epi64(0); Vectorized<double> max = _mm512_max_pd(a, b); auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q); auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask, 0xFFFFFFFFFFFFFFFF)); // Exploit the fact that all-ones is a NaN. return _mm512_or_pd(max, isnan); } // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if // either input is a NaN. template <> Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) { auto zero_vec = _mm512_set1_epi64(0); Vectorized<double> min = _mm512_min_pd(a, b); auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q); auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask, 0xFFFFFFFFFFFFFFFF)); // Exploit the fact that all-ones is a NaN. return _mm512_or_pd(min, isnan); } template <> Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) { return _mm512_min_pd(max, _mm512_max_pd(min, a)); } template <> Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) { return _mm512_max_pd(min, a); } template <> Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) { return _mm512_min_pd(max, a); } template <> Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm512_and_pd(a, b); } template <> Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm512_or_pd(a, b); } template <> Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) { return _mm512_xor_pd(a, b); } inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const { return (*this == other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const { return (*this != other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const { return (*this > other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const { return (*this >= other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const { return (*this < other) & Vectorized<double>(1.0); } inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const { return (*this <= other) & Vectorized<double>(1.0); } template <> inline void convert(const double* src, double* dst, int64_t n) { int64_t i; #pragma unroll for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) { _mm512_storeu_pd(dst + i, _mm512_loadu_pd(src + i)); } #pragma unroll for (; i < n; i++) { dst[i] = src[i]; } } template <> Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) { return _mm512_fmadd_pd(a, b, c); } template <> Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) { return _mm512_fmsub_pd(a, b, c); } #endif }}}
16,618
34.81681
124
h
null
pytorch-main/aten/src/ATen/cuda/CUDAContext.h
#pragma once #include <cstdint> #include <cuda_runtime_api.h> #include <cusparse.h> #include <cublas_v2.h> #ifdef CUDART_VERSION #include <cusolverDn.h> #endif #if defined(USE_ROCM) && ROCM_VERSION >= 50300 #include <hipsolver/hipsolver.h> #endif #include <ATen/core/ATenGeneral.h> #include <ATen/Context.h> #include <c10/cuda/CUDAStream.h> #include <c10/cuda/CUDAFunctions.h> #include <c10/util/Logging.h> #include <ATen/cuda/Exceptions.h> namespace at { namespace cuda { /* A common CUDA interface for ATen. This interface is distinct from CUDAHooks, which defines an interface that links to both CPU-only and CUDA builds. That interface is intended for runtime dispatch and should be used from files that are included in both CPU-only and CUDA builds. CUDAContext, on the other hand, should be preferred by files only included in CUDA builds. It is intended to expose CUDA functionality in a consistent manner. This means there is some overlap between the CUDAContext and CUDAHooks, but the choice of which to use is simple: use CUDAContext when in a CUDA-only file, use CUDAHooks otherwise. Note that CUDAContext simply defines an interface with no associated class. It is expected that the modules whose functions compose this interface will manage their own state. There is only a single CUDA context/state. */ /** * DEPRECATED: use device_count() instead */ inline int64_t getNumGPUs() { return c10::cuda::device_count(); } /** * CUDA is available if we compiled with CUDA, and there are one or more * devices. If we compiled with CUDA but there is a driver problem, etc., * this function will report CUDA is not available (rather than raise an error.) */ inline bool is_available() { return c10::cuda::device_count() > 0; } TORCH_CUDA_CPP_API cudaDeviceProp* getCurrentDeviceProperties(); TORCH_CUDA_CPP_API int warp_size(); TORCH_CUDA_CPP_API cudaDeviceProp* getDeviceProperties(int64_t device); TORCH_CUDA_CPP_API bool canDeviceAccessPeer( int64_t device, int64_t peer_device); TORCH_CUDA_CPP_API Allocator* getCUDADeviceAllocator(); /* Handles */ TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle(); TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle(); TORCH_CUDA_CPP_API void clearCublasWorkspaces(); #if defined(CUDART_VERSION) || defined(USE_ROCM) && ROCM_VERSION >= 50300 TORCH_CUDA_CPP_API cusolverDnHandle_t getCurrentCUDASolverDnHandle(); #endif } // namespace cuda } // namespace at
2,469
27.068182
80
h
null
pytorch-main/aten/src/ATen/cuda/CUDADataType.h
#pragma once #include <c10/core/ScalarType.h> #include <cuda.h> #include <library_types.h> namespace at { namespace cuda { template <typename scalar_t> cudaDataType getCudaDataType() { TORCH_INTERNAL_ASSERT(false, "Cannot convert type ", typeid(scalar_t).name(), " to cudaDataType.") } template<> inline cudaDataType getCudaDataType<at::Half>() { return CUDA_R_16F; } template<> inline cudaDataType getCudaDataType<float>() { return CUDA_R_32F; } template<> inline cudaDataType getCudaDataType<double>() { return CUDA_R_64F; } template<> inline cudaDataType getCudaDataType<c10::complex<c10::Half>>() { return CUDA_C_16F; } template<> inline cudaDataType getCudaDataType<c10::complex<float>>() { return CUDA_C_32F; } template<> inline cudaDataType getCudaDataType<c10::complex<double>>() { return CUDA_C_64F; } // HIP doesn't define integral types #ifndef USE_ROCM template<> inline cudaDataType getCudaDataType<uint8_t>() { return CUDA_R_8U; } template<> inline cudaDataType getCudaDataType<int8_t>() { return CUDA_R_8I; } template<> inline cudaDataType getCudaDataType<int>() { return CUDA_R_32I; } #endif #if !defined(USE_ROCM) template<> inline cudaDataType getCudaDataType<int16_t>() { return CUDA_R_16I; } template<> inline cudaDataType getCudaDataType<int64_t>() { return CUDA_R_64I; } template<> inline cudaDataType getCudaDataType<at::BFloat16>() { return CUDA_R_16BF; } #endif inline cudaDataType ScalarTypeToCudaDataType(const c10::ScalarType& scalar_type) { switch (scalar_type) { // HIP doesn't define integral types #ifndef USE_ROCM case c10::ScalarType::Byte: return CUDA_R_8U; case c10::ScalarType::Char: return CUDA_R_8I; case c10::ScalarType::Int: return CUDA_R_32I; #endif case c10::ScalarType::Half: return CUDA_R_16F; case c10::ScalarType::Float: return CUDA_R_32F; case c10::ScalarType::Double: return CUDA_R_64F; case c10::ScalarType::ComplexHalf: return CUDA_C_16F; case c10::ScalarType::ComplexFloat: return CUDA_C_32F; case c10::ScalarType::ComplexDouble: return CUDA_C_64F; #if !defined(USE_ROCM) case c10::ScalarType::Short: return CUDA_R_16I; case c10::ScalarType::Long: return CUDA_R_64I; case c10::ScalarType::BFloat16: return CUDA_R_16BF; #endif default: TORCH_INTERNAL_ASSERT(false, "Cannot convert ScalarType ", scalar_type, " to cudaDataType.") } } } // namespace cuda } // namespace at
2,491
24.428571
100
h
null
pytorch-main/aten/src/ATen/cuda/CUDADevice.h
#pragma once #include <ATen/cuda/Exceptions.h> #include <cuda.h> #include <cuda_runtime.h> namespace at { namespace cuda { inline Device getDeviceFromPtr(void* ptr) { cudaPointerAttributes attr{}; AT_CUDA_CHECK(cudaPointerGetAttributes(&attr, ptr)); #if !defined(USE_ROCM) TORCH_CHECK(attr.type != cudaMemoryTypeUnregistered, "The specified pointer resides on host memory and is not registered with any CUDA device."); #endif return {c10::DeviceType::CUDA, static_cast<DeviceIndex>(attr.device)}; } }} // namespace at::cuda
544
20.8
96
h
null
pytorch-main/aten/src/ATen/cuda/CUDAEvent.h
#pragma once #include <ATen/cuda/ATenCUDAGeneral.h> #include <ATen/cuda/CUDAContext.h> #include <c10/core/impl/GPUTrace.h> #include <c10/cuda/CUDAStream.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/Exceptions.h> #include <c10/util/Exception.h> #include <cuda_runtime_api.h> #include <cstdint> #include <utility> namespace at { namespace cuda { /* * CUDAEvents are movable not copyable wrappers around CUDA's events. * * CUDAEvents are constructed lazily when first recorded unless it is * reconstructed from a cudaIpcEventHandle_t. The event has a device, and this * device is acquired from the first recording stream. However, if reconstructed * from a handle, the device should be explicitly specified; or if ipc_handle() is * called before the event is ever recorded, it will use the current device. * Later streams that record the event must match this device. */ struct TORCH_CUDA_CPP_API CUDAEvent { // Constructors // Default value for `flags` is specified below - it's cudaEventDisableTiming CUDAEvent() noexcept = default; CUDAEvent(unsigned int flags) noexcept : flags_{flags} {} CUDAEvent( DeviceIndex device_index, const cudaIpcEventHandle_t* handle) { device_index_ = device_index; CUDAGuard guard(device_index_); AT_CUDA_CHECK(cudaIpcOpenEventHandle(&event_, *handle)); is_created_ = true; } // Note: event destruction done on creating device to avoid creating a // CUDA context on other devices. ~CUDAEvent() { try { if (is_created_) { CUDAGuard guard(device_index_); const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); if (C10_UNLIKELY(interp)) { (*interp)->trace_gpu_event_deletion(reinterpret_cast<uintptr_t>(event_)); } cudaEventDestroy(event_); } } catch (...) { /* No throw */ } } CUDAEvent(const CUDAEvent&) = delete; CUDAEvent& operator=(const CUDAEvent&) = delete; CUDAEvent(CUDAEvent&& other) noexcept { moveHelper(std::move(other)); } CUDAEvent& operator=(CUDAEvent&& other) noexcept { if (this != &other) { moveHelper(std::move(other)); } return *this; } operator cudaEvent_t() const { return event(); } // Less than operator (to allow use in sets) friend bool operator<(const CUDAEvent& left, const CUDAEvent& right) { return left.event_ < right.event_; } optional<at::Device> device() const { if (is_created_) { return at::Device(at::kCUDA, device_index_); } else { return {}; } } bool isCreated() const { return is_created_; } DeviceIndex device_index() const {return device_index_;} cudaEvent_t event() const { return event_; } // Note: cudaEventQuery can be safely called from any device bool query() const { if (!is_created_) { return true; } cudaError_t err = cudaEventQuery(event_); if (err == cudaSuccess) { return true; } else if (err != cudaErrorNotReady) { C10_CUDA_CHECK(err); } else { // ignore and clear the error if not ready (void)cudaGetLastError(); } return false; } void record() { record(getCurrentCUDAStream()); } void recordOnce(const CUDAStream& stream) { if (!was_recorded_) record(stream); } // Note: cudaEventRecord must be called on the same device as the event. void record(const CUDAStream& stream) { if (!is_created_) { createEvent(stream.device_index()); } TORCH_CHECK(device_index_ == stream.device_index(), "Event device ", device_index_, " does not match recording stream's device ", stream.device_index(), "."); CUDAGuard guard(device_index_); AT_CUDA_CHECK(cudaEventRecord(event_, stream)); const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); if (C10_UNLIKELY(interp)) { (*interp)->trace_gpu_event_record( reinterpret_cast<uintptr_t>(event_), reinterpret_cast<uintptr_t>(stream.stream()) ); } was_recorded_ = true; } // Note: cudaStreamWaitEvent must be called on the same device as the stream. // The event has no actual GPU resources associated with it. void block(const CUDAStream& stream) { if (is_created_) { CUDAGuard guard(stream.device_index()); AT_CUDA_CHECK(cudaStreamWaitEvent(stream, event_, 0)); const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); if (C10_UNLIKELY(interp)) { (*interp)->trace_gpu_event_wait( reinterpret_cast<uintptr_t>(event_), reinterpret_cast<uintptr_t>(stream.stream()) ); } } } // Note: cudaEventElapsedTime can be safely called from any device float elapsed_time(const CUDAEvent& other) const { TORCH_CHECK(is_created_ && other.isCreated(), "Both events must be recorded before calculating elapsed time."); float time_ms = 0; // raise cudaErrorNotReady if either event is recorded but not yet completed AT_CUDA_CHECK(cudaEventElapsedTime(&time_ms, event_, other.event_)); return time_ms; } // Note: cudaEventSynchronize can be safely called from any device void synchronize() const { if (is_created_) { const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); if (C10_UNLIKELY(interp)) { (*interp)->trace_gpu_event_synchronization(reinterpret_cast<uintptr_t>(event_)); } AT_CUDA_CHECK(cudaEventSynchronize(event_)); } } // Note: cudaIpcGetEventHandle must be called on the same device as the event void ipc_handle(cudaIpcEventHandle_t * handle) { if (!is_created_) { // this CUDAEvent object was initially constructed from flags but event_ // is not created yet. createEvent(getCurrentCUDAStream().device_index()); } CUDAGuard guard(device_index_); AT_CUDA_CHECK(cudaIpcGetEventHandle(handle, event_)); } private: unsigned int flags_ = cudaEventDisableTiming; bool is_created_ = false; bool was_recorded_ = false; DeviceIndex device_index_ = -1; cudaEvent_t event_{}; void createEvent(DeviceIndex device_index) { device_index_ = device_index; CUDAGuard guard(device_index_); AT_CUDA_CHECK(cudaEventCreateWithFlags(&event_, flags_)); const c10::impl::PyInterpreter* interp = c10::impl::GPUTrace::get_trace(); if (C10_UNLIKELY(interp)) { (*interp)->trace_gpu_event_creation(reinterpret_cast<uintptr_t>(event_)); } is_created_ = true; } void moveHelper(CUDAEvent&& other) { std::swap(flags_, other.flags_); std::swap(is_created_, other.is_created_); std::swap(was_recorded_, other.was_recorded_); std::swap(device_index_, other.device_index_); std::swap(event_, other.event_); } }; } // namespace cuda } // namespace at
6,793
31.352381
90
h
null
pytorch-main/aten/src/ATen/cuda/CUDAGeneratorImpl.h
#pragma once #include <ATen/core/Generator.h> #include <ATen/cuda/detail/PhiloxCudaStateRaw.cuh> #include <ATen/Context.h> #include <limits> #include <atomic> namespace at { /** * Note [CUDA Graph-safe RNG states] * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * Strategy: * ~~~~~~~~~ * (It helps to look at * cuda/detail/PhiloxCudaStateRaw.cuh and * cuda/detail/UnpackRaw.cuh * while you read this.) * * A CUDA graph containing multiple RNG ops behaves like a * single giant kernel from the perspective of ops external * to the graph. During graph capture, logic in CUDAGeneratorImpl * records the total of all offset increments that occur in the * graphed region, and records the final total as the offset for * the entire graph. * * When the graph reruns, the logic that reruns it * increments this device's CUDA generator's offset * by that total. * * Meanwhile, within the graph, at capture time, instead of * populating PhiloxCudaStates with the uint64_t offset pulled * directly from the global state, PhiloxCudaState uses a pointer * to a one-element stream-local int64_t device tensor * holding an initial offset value, and a uint64_t holding an * intra-graph offset. (The intra-graph offset starts from zero * when capture begins.) In each consumer kernel, * at::cuda::philox::unpack computes the offset to use for this kernel * as intra-graph offset + *initial offset. * * When the graph reruns, the logic that reruns it first * fill_s the initial offset tensor with this device's * CUDA generator's current offset. * * The control flow above ensures graphed execution is bitwise * identical to eager execution as long as RNG ops are enqueued * from a single thread, even if RNG ops and graphs containing * RNG ops are enqueued and run simultaneously on multiple streams. * * Usage: * ~~~~~~ * PhiloxCudaState in this file, and unpack() in * cuda/CUDAGraphsUtils.cuh allow non-divergent use of * CUDAGeneratorImpl whether graph capture is underway or not. * * Each PhiloxCudaState instance should be used for one and only one * consumer kernel. * * Example (see e.g. native/cuda/Dropout.cu): * * #include <ATen/cuda/CUDAGeneratorImpl.h> * #include <ATen/cuda/CUDAGraphsUtils.cuh> * * __global__ void kernel(..., PhiloxCudaState philox_args) { * auto seeds = at::cuda::philox::unpack(philox_args); * IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; * curandStatePhilox4_32_10_t state; * curand_init(std::get<0>(seeds), // seed * idx, // per-thread subsequence * std::get<1>(seeds), // offset in subsequence * &state); * ... * } * * host_caller(...) { * PhiloxCudaState rng_engine_inputs; * { * // See Note [Acquire lock when using random generators] * std::lock_guard<std::mutex> lock(gen->mutex_); * * // gen could be HostState or DevState here! No divergent code needed! * rng_engine_inputs = gen->philox_cuda_state(offset_increment); * } * kernel<<<...>>>(..., rng_engine_inputs); * } * */ struct TORCH_CUDA_CPP_API CUDAGeneratorImpl : public c10::GeneratorImpl { // Constructors CUDAGeneratorImpl(DeviceIndex device_index = -1); ~CUDAGeneratorImpl() override = default; // CUDAGeneratorImpl methods std::shared_ptr<CUDAGeneratorImpl> clone() const; void set_current_seed(uint64_t seed) override; void set_offset(uint64_t offset) override; uint64_t get_offset() const override; uint64_t current_seed() const override; uint64_t seed() override; void set_state(const c10::TensorImpl& new_state) override; c10::intrusive_ptr<c10::TensorImpl> get_state() const override; void set_philox_offset_per_thread(uint64_t offset); uint64_t philox_offset_per_thread() const; void capture_prologue(int64_t* seed_extragraph, int64_t* offset_extragraph); uint64_t capture_epilogue(); PhiloxCudaState philox_cuda_state(uint64_t increment); bool reset_rnn_state() { return !no_reset_rnn_state_.test_and_set(); } // Temporarily accommodates call sites that use philox_engine_inputs. // Allows incremental refactor of call sites to use philox_cuda_state. std::pair<uint64_t, uint64_t> philox_engine_inputs(uint64_t increment); static c10::DeviceType device_type(); private: CUDAGeneratorImpl* clone_impl() const override; uint64_t seed_ = default_rng_seed_val; uint64_t philox_offset_per_thread_ = 0; int64_t* seed_extragraph_{}; int64_t* offset_extragraph_{}; uint32_t offset_intragraph_ = 0; bool graph_expects_this_gen_ = false; std::atomic_flag no_reset_rnn_state_; }; namespace cuda { namespace detail { TORCH_CUDA_CPP_API const Generator& getDefaultCUDAGenerator( DeviceIndex device_index = -1); TORCH_CUDA_CPP_API Generator createCUDAGenerator(DeviceIndex device_index = -1); } // namespace detail } // namespace cuda } // namespace at
4,885
33.652482
80
h
null
pytorch-main/aten/src/ATen/cuda/CUDAGraph.h
#pragma once #include <ATen/Tensor.h> #include <c10/core/Device.h> #include <c10/cuda/CUDAGraphsC10Utils.h> #include <c10/cuda/CUDAStream.h> namespace at { struct CUDAGeneratorImpl; namespace cuda { // Standalone way to get a unique mempool id usable as a pool=... argument // to CUDAGraph::capture_begin TORCH_CUDA_CPP_API MempoolId_t graph_pool_handle(); struct TORCH_CUDA_CPP_API CUDAGraph { CUDAGraph(); ~CUDAGraph(); void capture_begin(MempoolId_t pool={0, 0}); void capture_end(); void replay(); void reset(); MempoolId_t pool(); void enable_debug_mode(); void debug_dump(const std::string& debug_path); protected: #if !defined(USE_ROCM) || ROCM_VERSION >= 50300 cudaGraph_t graph_ = NULL; cudaGraphExec_t graph_exec_ = NULL; #endif // internal states so reset() can do its best cleaning up // Set to true in capture_end if cudaStreamEndCapture succeeded // Set back to false soon after, when graph_ is consumed by cudaGraphInstantiate // to create graph_exec_, then graph_ is deleted bool has_graph_ = false; // Set to true in capture_end if cudaGraphInstantiate succeeded bool has_graph_exec_ = false; // uuid of this instance's current capture, retrieved from Cuda CaptureId_t id_; // uuid used to request a particular private mempool from CUDACachingAllocator. // By default, this will be set to {id_, 0}. // // If capture_begin is called with "pool=other_graph.pool()", this graph's mempool_id_ // will be set to the other graph's mempool_id_, and therefore share a mempool with the // other graph. // // If capture_begin is called with "pool=handle" where "handle" came from graph_pool_handle(), // it will share a mempool with any other captures that used "pool=handle". // // Sharing a mempool across graphs saves memory, and it's safe if you // know you'll replay those graphs in the same order you captured them. MempoolId_t mempool_id_; // Stream on which capture began at::cuda::CUDAStream capture_stream_; // Default generator on device where capture began at::CUDAGeneratorImpl* capture_gen_; // Device where capture occurred. Right now, for simplicity, we require all ops // in a capture to run on the same device, but this is a limitation of CUDAGraph, // not CUDA itself. We can straightforwardly modify CUDAGraph to support multi-device // captures if needed. int capture_dev_; // RNG state trackers at::Tensor seed_extragraph_; at::Tensor offset_extragraph_; uint64_t wholegraph_increment_; }; } // namespace cuda } // namespace at
2,568
30.716049
96
h
null
pytorch-main/aten/src/ATen/cuda/CUDASparseBlas.h
#pragma once /* Provides a subset of cuSPARSE functions as templates: csrgeam2<scalar_t>(...) where scalar_t is double, float, c10::complex<double> or c10::complex<float>. The functions are available in at::cuda::sparse namespace. */ #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDASparse.h> namespace at { namespace cuda { namespace sparse { #define CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t) \ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \ const cusparseMatDescr_t descrA, int nnzA, \ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \ const int *csrSortedColIndA, const scalar_t *beta, \ const cusparseMatDescr_t descrB, int nnzB, \ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ const scalar_t *csrSortedValC, const int *csrSortedRowPtrC, \ const int *csrSortedColIndC, size_t *pBufferSizeInBytes template <typename scalar_t> inline void csrgeam2_bufferSizeExt( CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::csrgeam2_bufferSizeExt: not implemented for ", typeid(scalar_t).name()); } template <> void csrgeam2_bufferSizeExt<float>( CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(float)); template <> void csrgeam2_bufferSizeExt<double>( CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(double)); template <> void csrgeam2_bufferSizeExt<c10::complex<float>>( CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<float>)); template <> void csrgeam2_bufferSizeExt<c10::complex<double>>( CUSPARSE_CSRGEAM2_BUFFERSIZE_ARGTYPES(c10::complex<double>)); #define CUSPARSE_CSRGEAM2_NNZ_ARGTYPES() \ cusparseHandle_t handle, int m, int n, const cusparseMatDescr_t descrA, \ int nnzA, const int *csrSortedRowPtrA, const int *csrSortedColIndA, \ const cusparseMatDescr_t descrB, int nnzB, const int *csrSortedRowPtrB, \ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ int *csrSortedRowPtrC, int *nnzTotalDevHostPtr, void *workspace template <typename scalar_t> inline void csrgeam2Nnz(CUSPARSE_CSRGEAM2_NNZ_ARGTYPES()) { TORCH_CUDASPARSE_CHECK(cusparseXcsrgeam2Nnz( handle, m, n, descrA, nnzA, csrSortedRowPtrA, csrSortedColIndA, descrB, nnzB, csrSortedRowPtrB, csrSortedColIndB, descrC, csrSortedRowPtrC, nnzTotalDevHostPtr, workspace)); } #define CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t) \ cusparseHandle_t handle, int m, int n, const scalar_t *alpha, \ const cusparseMatDescr_t descrA, int nnzA, \ const scalar_t *csrSortedValA, const int *csrSortedRowPtrA, \ const int *csrSortedColIndA, const scalar_t *beta, \ const cusparseMatDescr_t descrB, int nnzB, \ const scalar_t *csrSortedValB, const int *csrSortedRowPtrB, \ const int *csrSortedColIndB, const cusparseMatDescr_t descrC, \ scalar_t *csrSortedValC, int *csrSortedRowPtrC, int *csrSortedColIndC, \ void *pBuffer template <typename scalar_t> inline void csrgeam2(CUSPARSE_CSRGEAM2_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::csrgeam2: not implemented for ", typeid(scalar_t).name()); } template <> void csrgeam2<float>(CUSPARSE_CSRGEAM2_ARGTYPES(float)); template <> void csrgeam2<double>(CUSPARSE_CSRGEAM2_ARGTYPES(double)); template <> void csrgeam2<c10::complex<float>>( CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<float>)); template <> void csrgeam2<c10::complex<double>>( CUSPARSE_CSRGEAM2_ARGTYPES(c10::complex<double>)); #define CUSPARSE_BSRMM_ARGTYPES(scalar_t) \ cusparseHandle_t handle, cusparseDirection_t dirA, \ cusparseOperation_t transA, cusparseOperation_t transB, int mb, int n, \ int kb, int nnzb, const scalar_t *alpha, \ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ const scalar_t *B, int ldb, const scalar_t *beta, scalar_t *C, int ldc template <typename scalar_t> inline void bsrmm(CUSPARSE_BSRMM_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::bsrmm: not implemented for ", typeid(scalar_t).name()); } template <> void bsrmm<float>(CUSPARSE_BSRMM_ARGTYPES(float)); template <> void bsrmm<double>(CUSPARSE_BSRMM_ARGTYPES(double)); template <> void bsrmm<c10::complex<float>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<float>)); template <> void bsrmm<c10::complex<double>>(CUSPARSE_BSRMM_ARGTYPES(c10::complex<double>)); #define CUSPARSE_BSRMV_ARGTYPES(scalar_t) \ cusparseHandle_t handle, cusparseDirection_t dirA, \ cusparseOperation_t transA, int mb, int nb, int nnzb, \ const scalar_t *alpha, const cusparseMatDescr_t descrA, \ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \ int blockDim, const scalar_t *x, const scalar_t *beta, scalar_t *y template <typename scalar_t> inline void bsrmv(CUSPARSE_BSRMV_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::bsrmv: not implemented for ", typeid(scalar_t).name()); } template <> void bsrmv<float>(CUSPARSE_BSRMV_ARGTYPES(float)); template <> void bsrmv<double>(CUSPARSE_BSRMV_ARGTYPES(double)); template <> void bsrmv<c10::complex<float>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<float>)); template <> void bsrmv<c10::complex<double>>(CUSPARSE_BSRMV_ARGTYPES(c10::complex<double>)); #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() #define CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t) \ cusparseHandle_t handle, cusparseDirection_t dirA, \ cusparseOperation_t transA, int mb, int nnzb, \ const cusparseMatDescr_t descrA, scalar_t *bsrValA, \ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ bsrsv2Info_t info, int *pBufferSizeInBytes template <typename scalar_t> inline void bsrsv2_bufferSize(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::bsrsv2_bufferSize: not implemented for ", typeid(scalar_t).name()); } template <> void bsrsv2_bufferSize<float>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(float)); template <> void bsrsv2_bufferSize<double>(CUSPARSE_BSRSV2_BUFFER_ARGTYPES(double)); template <> void bsrsv2_bufferSize<c10::complex<float>>( CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<float>)); template <> void bsrsv2_bufferSize<c10::complex<double>>( CUSPARSE_BSRSV2_BUFFER_ARGTYPES(c10::complex<double>)); #define CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t) \ cusparseHandle_t handle, cusparseDirection_t dirA, \ cusparseOperation_t transA, int mb, int nnzb, \ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ bsrsv2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer template <typename scalar_t> inline void bsrsv2_analysis(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::bsrsv2_analysis: not implemented for ", typeid(scalar_t).name()); } template <> void bsrsv2_analysis<float>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(float)); template <> void bsrsv2_analysis<double>(CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(double)); template <> void bsrsv2_analysis<c10::complex<float>>( CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<float>)); template <> void bsrsv2_analysis<c10::complex<double>>( CUSPARSE_BSRSV2_ANALYSIS_ARGTYPES(c10::complex<double>)); #define CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t) \ cusparseHandle_t handle, cusparseDirection_t dirA, \ cusparseOperation_t transA, int mb, int nnzb, const scalar_t *alpha, \ const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ bsrsv2Info_t info, const scalar_t *x, scalar_t *y, \ cusparseSolvePolicy_t policy, void *pBuffer template <typename scalar_t> inline void bsrsv2_solve(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::bsrsv2_solve: not implemented for ", typeid(scalar_t).name()); } template <> void bsrsv2_solve<float>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(float)); template <> void bsrsv2_solve<double>(CUSPARSE_BSRSV2_SOLVE_ARGTYPES(double)); template <> void bsrsv2_solve<c10::complex<float>>( CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<float>)); template <> void bsrsv2_solve<c10::complex<double>>( CUSPARSE_BSRSV2_SOLVE_ARGTYPES(c10::complex<double>)); #define CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t) \ cusparseHandle_t handle, cusparseDirection_t dirA, \ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ int nnzb, const cusparseMatDescr_t descrA, scalar_t *bsrValA, \ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ bsrsm2Info_t info, int *pBufferSizeInBytes template <typename scalar_t> inline void bsrsm2_bufferSize(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::bsrsm2_bufferSize: not implemented for ", typeid(scalar_t).name()); } template <> void bsrsm2_bufferSize<float>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(float)); template <> void bsrsm2_bufferSize<double>(CUSPARSE_BSRSM2_BUFFER_ARGTYPES(double)); template <> void bsrsm2_bufferSize<c10::complex<float>>( CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<float>)); template <> void bsrsm2_bufferSize<c10::complex<double>>( CUSPARSE_BSRSM2_BUFFER_ARGTYPES(c10::complex<double>)); #define CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t) \ cusparseHandle_t handle, cusparseDirection_t dirA, \ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ int nnzb, const cusparseMatDescr_t descrA, const scalar_t *bsrValA, \ const int *bsrRowPtrA, const int *bsrColIndA, int blockDim, \ bsrsm2Info_t info, cusparseSolvePolicy_t policy, void *pBuffer template <typename scalar_t> inline void bsrsm2_analysis(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::bsrsm2_analysis: not implemented for ", typeid(scalar_t).name()); } template <> void bsrsm2_analysis<float>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(float)); template <> void bsrsm2_analysis<double>(CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(double)); template <> void bsrsm2_analysis<c10::complex<float>>( CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<float>)); template <> void bsrsm2_analysis<c10::complex<double>>( CUSPARSE_BSRSM2_ANALYSIS_ARGTYPES(c10::complex<double>)); #define CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t) \ cusparseHandle_t handle, cusparseDirection_t dirA, \ cusparseOperation_t transA, cusparseOperation_t transX, int mb, int n, \ int nnzb, const scalar_t *alpha, const cusparseMatDescr_t descrA, \ const scalar_t *bsrValA, const int *bsrRowPtrA, const int *bsrColIndA, \ int blockDim, bsrsm2Info_t info, const scalar_t *B, int ldb, \ scalar_t *X, int ldx, cusparseSolvePolicy_t policy, void *pBuffer template <typename scalar_t> inline void bsrsm2_solve(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(scalar_t)) { TORCH_INTERNAL_ASSERT( false, "at::cuda::sparse::bsrsm2_solve: not implemented for ", typeid(scalar_t).name()); } template <> void bsrsm2_solve<float>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(float)); template <> void bsrsm2_solve<double>(CUSPARSE_BSRSM2_SOLVE_ARGTYPES(double)); template <> void bsrsm2_solve<c10::complex<float>>( CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<float>)); template <> void bsrsm2_solve<c10::complex<double>>( CUSPARSE_BSRSM2_SOLVE_ARGTYPES(c10::complex<double>)); #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE } // namespace sparse } // namespace cuda } // namespace at
12,753
38.486068
80
h
null
pytorch-main/aten/src/ATen/cuda/CUDASparseDescriptors.h
#pragma once #include <ATen/Tensor.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDASparse.h> #include <c10/core/ScalarType.h> #if defined(USE_ROCM) #include <type_traits> #endif namespace at { namespace cuda { namespace sparse { template <typename T, cusparseStatus_t (*destructor)(T*)> struct CuSparseDescriptorDeleter { void operator()(T* x) { if (x != nullptr) { TORCH_CUDASPARSE_CHECK(destructor(x)); } } }; template <typename T, cusparseStatus_t (*destructor)(T*)> class CuSparseDescriptor { public: T* descriptor() const { return descriptor_.get(); } T* descriptor() { return descriptor_.get(); } protected: std::unique_ptr<T, CuSparseDescriptorDeleter<T, destructor>> descriptor_; }; #if AT_USE_CUSPARSE_CONST_DESCRIPTORS() template <typename T, cusparseStatus_t (*destructor)(const T*)> struct ConstCuSparseDescriptorDeleter { void operator()(T* x) { if (x != nullptr) { TORCH_CUDASPARSE_CHECK(destructor(x)); } } }; template <typename T, cusparseStatus_t (*destructor)(const T*)> class ConstCuSparseDescriptor { public: T* descriptor() const { return descriptor_.get(); } T* descriptor() { return descriptor_.get(); } protected: std::unique_ptr<T, ConstCuSparseDescriptorDeleter<T, destructor>> descriptor_; }; #endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS #if defined(USE_ROCM) // hipSPARSE doesn't define this using cusparseMatDescr = std::remove_pointer<cusparseMatDescr_t>::type; using cusparseDnMatDescr = std::remove_pointer<cusparseDnMatDescr_t>::type; using cusparseDnVecDescr = std::remove_pointer<cusparseDnVecDescr_t>::type; using cusparseSpMatDescr = std::remove_pointer<cusparseSpMatDescr_t>::type; using cusparseSpMatDescr = std::remove_pointer<cusparseSpMatDescr_t>::type; using cusparseSpGEMMDescr = std::remove_pointer<cusparseSpGEMMDescr_t>::type; #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() using bsrsv2Info = std::remove_pointer<bsrsv2Info_t>::type; using bsrsm2Info = std::remove_pointer<bsrsm2Info_t>::type; #endif #endif class TORCH_CUDA_CPP_API CuSparseMatDescriptor : public CuSparseDescriptor<cusparseMatDescr, &cusparseDestroyMatDescr> { public: CuSparseMatDescriptor() { cusparseMatDescr_t raw_descriptor; TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor)); descriptor_.reset(raw_descriptor); } CuSparseMatDescriptor(bool upper, bool unit) { cusparseFillMode_t fill_mode = upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER; cusparseDiagType_t diag_type = unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT; cusparseMatDescr_t raw_descriptor; TORCH_CUDASPARSE_CHECK(cusparseCreateMatDescr(&raw_descriptor)); TORCH_CUDASPARSE_CHECK(cusparseSetMatFillMode(raw_descriptor, fill_mode)); TORCH_CUDASPARSE_CHECK(cusparseSetMatDiagType(raw_descriptor, diag_type)); descriptor_.reset(raw_descriptor); } }; #if AT_USE_HIPSPARSE_TRIANGULAR_SOLVE() class TORCH_CUDA_CPP_API CuSparseBsrsv2Info : public CuSparseDescriptor<bsrsv2Info, &cusparseDestroyBsrsv2Info> { public: CuSparseBsrsv2Info() { bsrsv2Info_t raw_descriptor; TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsv2Info(&raw_descriptor)); descriptor_.reset(raw_descriptor); } }; class TORCH_CUDA_CPP_API CuSparseBsrsm2Info : public CuSparseDescriptor<bsrsm2Info, &cusparseDestroyBsrsm2Info> { public: CuSparseBsrsm2Info() { bsrsm2Info_t raw_descriptor; TORCH_CUDASPARSE_CHECK(cusparseCreateBsrsm2Info(&raw_descriptor)); descriptor_.reset(raw_descriptor); } }; #endif // AT_USE_HIPSPARSE_TRIANGULAR_SOLVE #if AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API() cusparseIndexType_t getCuSparseIndexType(const c10::ScalarType& scalar_type); #if AT_USE_HIPSPARSE_GENERIC_52_API() || \ (AT_USE_CUSPARSE_GENERIC_API() && AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS()) class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor : public CuSparseDescriptor<cusparseDnMatDescr, &cusparseDestroyDnMat> { public: explicit CuSparseDnMatDescriptor(const Tensor& input, int64_t batch_offset = -1); }; class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor : public CuSparseDescriptor<cusparseDnVecDescr, &cusparseDestroyDnVec> { public: explicit CuSparseDnVecDescriptor(const Tensor& input); }; class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor : public CuSparseDescriptor<cusparseSpMatDescr, &cusparseDestroySpMat> {}; //AT_USE_HIPSPARSE_GENERIC_52_API() || (AT_USE_CUSPARSE_GENERIC_API() && AT_USE_CUSPARSE_NON_CONST_DESCRIPTORS()) #elif AT_USE_CUSPARSE_CONST_DESCRIPTORS() class TORCH_CUDA_CPP_API CuSparseDnMatDescriptor : public ConstCuSparseDescriptor< cusparseDnMatDescr, &cusparseDestroyDnMat> { public: explicit CuSparseDnMatDescriptor( const Tensor& input, int64_t batch_offset = -1); }; class TORCH_CUDA_CPP_API CuSparseDnVecDescriptor : public ConstCuSparseDescriptor< cusparseDnVecDescr, &cusparseDestroyDnVec> { public: explicit CuSparseDnVecDescriptor(const Tensor& input); }; class TORCH_CUDA_CPP_API CuSparseSpMatDescriptor : public ConstCuSparseDescriptor< cusparseSpMatDescr, &cusparseDestroySpMat> {}; #endif // AT_USE_CUSPARSE_CONST_DESCRIPTORS() class TORCH_CUDA_CPP_API CuSparseSpMatCsrDescriptor : public CuSparseSpMatDescriptor { public: explicit CuSparseSpMatCsrDescriptor(const Tensor& input, int64_t batch_offset = -1); std::tuple<int64_t, int64_t, int64_t> get_size() { int64_t rows, cols, nnz; TORCH_CUDASPARSE_CHECK(cusparseSpMatGetSize( this->descriptor(), &rows, &cols, &nnz)); return std::make_tuple(rows, cols, nnz); } void set_tensor(const Tensor& input) { auto crow_indices = input.crow_indices(); auto col_indices = input.col_indices(); auto values = input.values(); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(crow_indices.is_contiguous()); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(col_indices.is_contiguous()); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(values.is_contiguous()); TORCH_CUDASPARSE_CHECK(cusparseCsrSetPointers( this->descriptor(), crow_indices.data_ptr(), col_indices.data_ptr(), values.data_ptr())); } #if AT_USE_CUSPARSE_GENERIC_SPSV() void set_mat_fill_mode(bool upper) { cusparseFillMode_t fill_mode = upper ? CUSPARSE_FILL_MODE_UPPER : CUSPARSE_FILL_MODE_LOWER; TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute( this->descriptor(), CUSPARSE_SPMAT_FILL_MODE, &fill_mode, sizeof(fill_mode))); } void set_mat_diag_type(bool unit) { cusparseDiagType_t diag_type = unit ? CUSPARSE_DIAG_TYPE_UNIT : CUSPARSE_DIAG_TYPE_NON_UNIT; TORCH_CUDASPARSE_CHECK(cusparseSpMatSetAttribute( this->descriptor(), CUSPARSE_SPMAT_DIAG_TYPE, &diag_type, sizeof(diag_type))); } #endif }; #if AT_USE_CUSPARSE_GENERIC_SPSV() class TORCH_CUDA_CPP_API CuSparseSpSVDescriptor : public CuSparseDescriptor<cusparseSpSVDescr, &cusparseSpSV_destroyDescr> { public: CuSparseSpSVDescriptor() { cusparseSpSVDescr_t raw_descriptor; TORCH_CUDASPARSE_CHECK(cusparseSpSV_createDescr(&raw_descriptor)); descriptor_.reset(raw_descriptor); } }; #endif #if AT_USE_CUSPARSE_GENERIC_SPSM() class TORCH_CUDA_CPP_API CuSparseSpSMDescriptor : public CuSparseDescriptor<cusparseSpSMDescr, &cusparseSpSM_destroyDescr> { public: CuSparseSpSMDescriptor() { cusparseSpSMDescr_t raw_descriptor; TORCH_CUDASPARSE_CHECK(cusparseSpSM_createDescr(&raw_descriptor)); descriptor_.reset(raw_descriptor); } }; #endif #if (defined(USE_ROCM) && ROCM_VERSION >= 50200) || !defined(USE_ROCM) class TORCH_CUDA_CPP_API CuSparseSpGEMMDescriptor : public CuSparseDescriptor<cusparseSpGEMMDescr, &cusparseSpGEMM_destroyDescr> { public: CuSparseSpGEMMDescriptor() { cusparseSpGEMMDescr_t raw_descriptor; TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&raw_descriptor)); descriptor_.reset(raw_descriptor); } }; #endif #endif // AT_USE_CUSPARSE_GENERIC_API() || AT_USE_HIPSPARSE_GENERIC_API() } // namespace sparse } // namespace cuda } // namespace at
8,265
29.958801
113
h
null
pytorch-main/aten/src/ATen/cuda/CachingHostAllocator.h
#pragma once #include <c10/core/Allocator.h> #include <c10/cuda/CUDAStream.h> namespace at { namespace cuda { // // A caching allocator for CUDA host allocations (pinned memory). // // This provides a drop-in replacement for THCudaHostAllocator, which re-uses // freed pinned (page-locked) memory allocations. This avoids device // synchronizations due to cudaFreeHost calls. // // To ensure correct behavior, THCCachingHostAllocator_recordEvent must be // called anytime a pointer from this allocator is used in a cudaMemcpyAsync // call between host and device, and passed the corresponding context from the // allocation. This is currently invoked by at::native::copy_kernel_cuda. // // Note that this allocator does not split larger allocations into smaller // blocks, unlike the caching device allocator. // TORCH_CUDA_CPP_API c10::Allocator* getCachingHostAllocator(); // Records an event in the specified stream. The allocation corresponding to the // input `ptr`/`ctx` will not be re-used until the event has occurred. TORCH_CUDA_CPP_API bool CachingHostAllocator_recordEvent(void* ptr, void* ctx, c10::cuda::CUDAStream stream); // Releases cached pinned memory allocations via cudaHostFree TORCH_CUDA_CPP_API void CachingHostAllocator_emptyCache(); inline TORCH_CUDA_CPP_API at::DataPtr HostAlloc(size_t size) { return getCachingHostAllocator()->allocate(size); } } // namespace cuda } // namespace at
1,420
34.525
85
h
null
pytorch-main/aten/src/ATen/cuda/EmptyTensor.h
#pragma once #include <ATen/core/TensorBase.h> namespace at { namespace detail { TORCH_CUDA_CPP_API TensorBase empty_cuda( IntArrayRef size, ScalarType dtype, c10::optional<Device> device_opt, c10::optional<c10::MemoryFormat> memory_format_opt); TORCH_CUDA_CPP_API TensorBase empty_cuda( IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt); TORCH_CUDA_CPP_API TensorBase empty_cuda( IntArrayRef size, const TensorOptions &options); TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( IntArrayRef size, IntArrayRef stride, ScalarType dtype, c10::optional<Device> device_opt); TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt); TORCH_CUDA_CPP_API TensorBase empty_strided_cuda( IntArrayRef size, IntArrayRef stride, const TensorOptions &options); }} // namespace at::detail
1,218
25.5
56
h
null
pytorch-main/aten/src/ATen/cuda/Exceptions.h
#pragma once #include <cublas_v2.h> #include <cusparse.h> #include <c10/macros/Export.h> #ifdef CUDART_VERSION #include <cusolver_common.h> #endif #include <ATen/Context.h> #include <c10/util/Exception.h> #include <c10/cuda/CUDAException.h> namespace c10 { class CuDNNError : public c10::Error { using Error::Error; }; } // namespace c10 #define AT_CUDNN_CHECK_WITH_SHAPES(EXPR, ...) AT_CUDNN_CHECK(EXPR, "\n", ##__VA_ARGS__) // See Note [CHECK macro] #define AT_CUDNN_CHECK(EXPR, ...) \ do { \ cudnnStatus_t status = EXPR; \ if (status != CUDNN_STATUS_SUCCESS) { \ if (status == CUDNN_STATUS_NOT_SUPPORTED) { \ TORCH_CHECK_WITH(CuDNNError, false, \ "cuDNN error: ", \ cudnnGetErrorString(status), \ ". This error may appear if you passed in a non-contiguous input.", ##__VA_ARGS__); \ } else { \ TORCH_CHECK_WITH(CuDNNError, false, \ "cuDNN error: ", cudnnGetErrorString(status), ##__VA_ARGS__); \ } \ } \ } while (0) namespace at { namespace cuda { namespace blas { C10_EXPORT const char* _cublasGetErrorEnum(cublasStatus_t error); }}} // namespace at::cuda::blas #define TORCH_CUDABLAS_CHECK(EXPR) \ do { \ cublasStatus_t __err = EXPR; \ TORCH_CHECK(__err == CUBLAS_STATUS_SUCCESS, \ "CUDA error: ", \ at::cuda::blas::_cublasGetErrorEnum(__err), \ " when calling `" #EXPR "`"); \ } while (0) const char *cusparseGetErrorString(cusparseStatus_t status); #define TORCH_CUDASPARSE_CHECK(EXPR) \ do { \ cusparseStatus_t __err = EXPR; \ TORCH_CHECK(__err == CUSPARSE_STATUS_SUCCESS, \ "CUDA error: ", \ cusparseGetErrorString(__err), \ " when calling `" #EXPR "`"); \ } while (0) // cusolver related headers are only supported on cuda now #ifdef CUDART_VERSION namespace at { namespace cuda { namespace solver { C10_EXPORT const char* cusolverGetErrorMessage(cusolverStatus_t status); }}} // namespace at::cuda::solver // When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan. // When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue. #define TORCH_CUSOLVER_CHECK(EXPR) \ do { \ cusolverStatus_t __err = EXPR; \ if ((CUDA_VERSION < 11500 && \ __err == CUSOLVER_STATUS_EXECUTION_FAILED) || \ (CUDA_VERSION >= 11500 && \ __err == CUSOLVER_STATUS_INVALID_VALUE)) { \ TORCH_CHECK_LINALG( \ false, \ "cusolver error: ", \ at::cuda::solver::cusolverGetErrorMessage(__err), \ ", when calling `" #EXPR "`", \ ". This error may appear if the input matrix contains NaN."); \ } else { \ TORCH_CHECK( \ __err == CUSOLVER_STATUS_SUCCESS, \ "cusolver error: ", \ at::cuda::solver::cusolverGetErrorMessage(__err), \ ", when calling `" #EXPR "`"); \ } \ } while (0) #else #define TORCH_CUSOLVER_CHECK(EXPR) EXPR #endif #define AT_CUDA_CHECK(EXPR) C10_CUDA_CHECK(EXPR) // For CUDA Driver API // // This is here instead of in c10 because NVRTC is loaded dynamically via a stub // in ATen, and we need to use its nvrtcGetErrorString. // See NOTE [ USE OF NVRTC AND DRIVER API ]. #if !defined(USE_ROCM) #define AT_CUDA_DRIVER_CHECK(EXPR) \ do { \ CUresult __err = EXPR; \ if (__err != CUDA_SUCCESS) { \ const char* err_str; \ CUresult get_error_str_err C10_UNUSED = at::globalContext().getNVRTC().cuGetErrorString(__err, &err_str); \ if (get_error_str_err != CUDA_SUCCESS) { \ AT_ERROR("CUDA driver error: unknown error"); \ } else { \ AT_ERROR("CUDA driver error: ", err_str); \ } \ } \ } while (0) #else #define AT_CUDA_DRIVER_CHECK(EXPR) \ do { \ CUresult __err = EXPR; \ if (__err != CUDA_SUCCESS) { \ AT_ERROR("CUDA driver error: ", static_cast<int>(__err)); \ } \ } while (0) #endif // For CUDA NVRTC // // Note: As of CUDA 10, nvrtc error code 7, NVRTC_ERROR_BUILTIN_OPERATION_FAILURE, // incorrectly produces the error string "NVRTC unknown error." // The following maps it correctly. // // This is here instead of in c10 because NVRTC is loaded dynamically via a stub // in ATen, and we need to use its nvrtcGetErrorString. // See NOTE [ USE OF NVRTC AND DRIVER API ]. #define AT_CUDA_NVRTC_CHECK(EXPR) \ do { \ nvrtcResult __err = EXPR; \ if (__err != NVRTC_SUCCESS) { \ if (static_cast<int>(__err) != 7) { \ AT_ERROR("CUDA NVRTC error: ", at::globalContext().getNVRTC().nvrtcGetErrorString(__err)); \ } else { \ AT_ERROR("CUDA NVRTC error: NVRTC_ERROR_BUILTIN_OPERATION_FAILURE"); \ } \ } \ } while (0)
8,643
54.057325
114
h
null
pytorch-main/aten/src/ATen/cuda/cub.h
#pragma once #include <cstdint> #include <c10/core/ScalarType.h> #include <ATen/cuda/CUDAConfig.h> // NOTE: These templates are intentionally not defined in this header, // which aviods re-compiling them for each translation unit. If you get // a link error, you need to add an explicit instantiation for your // types in cub.cu namespace at { namespace cuda { namespace cub { inline int get_num_bits(uint64_t max_key) { int num_bits = 1; while (max_key > 1) { max_key >>= 1; num_bits++; } return num_bits; } namespace detail { // radix_sort_pairs doesn't interact with value_t other than to copy // the data, so we can save template instantiations by reinterpreting // it as an opaque type. template <int N> struct alignas(N) OpaqueType { char data[N]; }; template<typename key_t, int value_size> void radix_sort_pairs_impl( const key_t *keys_in, key_t *keys_out, const OpaqueType<value_size> *values_in, OpaqueType<value_size> *values_out, int64_t n, bool descending, int64_t begin_bit, int64_t end_bit); } // namespace detail template<typename key_t, typename value_t> void radix_sort_pairs( const key_t *keys_in, key_t *keys_out, const value_t *values_in, value_t *values_out, int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8) { static_assert(std::is_trivially_copyable<value_t>::value || AT_ROCM_ENABLED(), // ROCm incorrectly fails this check for vector types "radix_sort_pairs value type must be trivially copyable"); // Make value type opaque, so all inputs of a certain size use the same template instantiation using opaque_t = detail::OpaqueType<sizeof(value_t)>; static_assert(sizeof(value_t) <= 8 && (sizeof(value_t) & (sizeof(value_t) - 1)) == 0, "This size of value_t is not instantiated. Please instantiate it in cub.cu" " and modify this check."); static_assert(sizeof(value_t) == alignof(value_t), "Expected value_t to be size-aligned"); detail::radix_sort_pairs_impl( keys_in, keys_out, reinterpret_cast<const opaque_t*>(values_in), reinterpret_cast<opaque_t*>(values_out), n, descending, begin_bit, end_bit); } template<typename key_t> void radix_sort_keys( const key_t *keys_in, key_t *keys_out, int64_t n, bool descending=false, int64_t begin_bit=0, int64_t end_bit=sizeof(key_t)*8); // NOTE: Intermediate sums will be truncated to input_t precision template <typename input_t, typename output_t> void inclusive_sum_truncating(const input_t *input, output_t *output, int64_t n); template <typename scalar_t> void inclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) { return inclusive_sum_truncating(input, output, n); } // NOTE: Sums are done is common_type<input_t, output_t> template <typename input_t, typename output_t> void exclusive_sum_in_common_type(const input_t *input, output_t *output, int64_t n); template <typename scalar_t> void exclusive_sum(const scalar_t *input, scalar_t *output, int64_t n) { return exclusive_sum_in_common_type(input, output, n); } void mask_exclusive_sum(const uint8_t *mask, int64_t *output_idx, int64_t n); inline void mask_exclusive_sum(const bool *mask, int64_t *output_idx, int64_t n) { return mask_exclusive_sum( reinterpret_cast<const uint8_t*>(mask), output_idx, n); } }}} // namespace at::cuda::cub
3,397
36.755556
96
h
null
pytorch-main/aten/src/ATen/cuda/jiterator.h
#pragma once #include <ATen/jit_macros.h> #if AT_USE_JITERATOR() #include <c10/macros/Export.h> #include <c10/util/SmallVector.h> #include <ATen/core/Tensor.h> #include <string> #include <vector> namespace at { namespace cuda { TORCH_CUDA_CPP_API c10::SmallVector<at::Tensor> CompileAndLaunchKernel( const std::string& code_string, const std::string& kernel_name, const int num_outputs, const c10::SmallVector<at::Tensor>& tensors, const c10::SmallVector<at::Scalar>& extra_args, bool return_by_ref); }} // namespace at::cuda #else namespace at { namespace cuda { TORCH_CUDA_CPP_API c10::SmallVector<at::Tensor> CompileAndLaunchKernel( const std::string& code_string, const std::string& kernel_name, const int num_outputs, const c10::SmallVector<at::Tensor>& tensors, const c10::SmallVector<at::Scalar>& extra_args, bool return_by_ref) { TORCH_CHECK(false, "Jiterator is not supported"); } }} // namespace at::cuda #endif // AT_USE_JITERATOR()
983
23
71
h
null
pytorch-main/aten/src/ATen/cuda/jiterator_impl.h
#pragma once #include <ATen/jit_macros.h> #if AT_USE_JITERATOR() #include <c10/util/variant.h> #include <ATen/native/TensorIterator.h> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/native/cuda/MemoryAccess.cuh> #include <ATen/native/cuda/JitLoops.cuh> #include <string> #include <vector> namespace at { namespace native { #define AT_FOR_8_CASES(_) \ _(1) \ _(2) \ _(3) \ _(4) \ _(5) \ _(6) \ _(7) \ _(8) #define AT_FOR_8_CASES_WITH_COMMA(_) \ _(1) , \ _(2) , \ _(3) , \ _(4) , \ _(5) , \ _(6) , \ _(7) , \ _(8) c10::SmallVector<std::string> get_extra_args_typenames(const c10::SmallVector<at::Scalar>& extra_args) { c10::SmallVector<std::string> args_typenames(extra_args.size()); for (const auto i : c10::irange(extra_args.size())) { args_typenames[i] = at::cuda::jit::typeName(extra_args[i].type()); } return args_typenames; } int can_vectorize_up_to(at::ScalarType type, char* pointer) { switch(type) { #define DEFINE_CASE(ctype, scalartype) \ case ScalarType::scalartype : return memory::can_vectorize_up_to<ctype>(pointer); AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) #undef DEFINE_CASE default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); } } // jitted version of the above // See Note [Jiterator], this relies on the assumptions enumerated there int jitted_can_vectorize_up_to(const TensorIteratorBase& iter) { const at::ScalarType common_dtype = iter.common_dtype(); const at::ScalarType result_dtype = common_dtype; // Deals with output int result = can_vectorize_up_to(result_dtype, static_cast<char*>(iter.data_ptr(0))); // Incorporates input(s) for (auto i = 1; i < iter.ntensors(); ++i) { result = std::min<int>(result, can_vectorize_up_to(common_dtype, static_cast<char*>(iter.data_ptr(i)))); } return result; } template<bool IS_INPUT, int N> static std::unique_ptr<OffsetCalculator<N>> make_unique_offset_calculator( const TensorIteratorBase& iter) { // array size can not be 0, this happens when N == 0 constexpr int array_size = std::max<int>(N, 1); TORCH_INTERNAL_ASSERT(N == (IS_INPUT ? iter.ninputs() : iter.noutputs())); std::array<const int64_t*, array_size> strides; int64_t element_sizes[array_size]; for (int i = 0; i < N; i++) { int index = IS_INPUT ? i + iter.noutputs() : i; strides[i] = iter.strides(index).data(); element_sizes[i] = iter.element_size(index); } return std::make_unique<OffsetCalculator<N>>(iter.ndim(), iter.shape().data(), strides.data(), element_sizes); } template <bool IS_INPUT> struct OffsetCalculatorVariant { #define DEFINE_CASE(index) std::unique_ptr<OffsetCalculator<index>> using OffsetCalculatorTypes = c10::variant< AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) >; #undef DEFINE_CASE OffsetCalculatorVariant(const TensorIteratorBase& iter) { int num = IS_INPUT ? iter.ninputs() : iter.noutputs(); switch(num) { #define DEFINE_CASE(index) \ case index : v = make_unique_offset_calculator<IS_INPUT, index>(iter); break; AT_FOR_8_CASES(DEFINE_CASE) #undef DEFINE_CASE default: TORCH_CHECK(false, "OffsetCalculatorVariant is not implemented for num_tensor = ", num); } } void* data_ptr() { return c10::visit([](auto & v){ return static_cast<void*>(v.get()); }, v); } private: OffsetCalculatorTypes v; }; struct ArrayVariant { // works for up to 8 input + 8 outputs #define DEFINE_CASE(index) at::detail::Array<char*, index>, at::detail::Array<char*, index+8> using ArrayTypes = c10::variant< AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) >; #undef DEFINE_CASE ArrayVariant(const TensorIteratorBase& iter) { int ntensors = iter.ntensors(); switch(ntensors) { #define DEFINE_CASE(index) \ case index: array = at::detail::Array<char*, index>{}; break; \ case index+8: array = at::detail::Array<char*, index+8>{}; break; AT_FOR_8_CASES(DEFINE_CASE) #undef DEFINE_CASE default: TORCH_CHECK(false, "ArrayVariant is not implemented for ntensors = ", ntensors); } c10::visit([&](auto& a) { for (auto i = 0; i < ntensors; ++i) { a[i] = (char*)iter.data_ptr(i); } }, array); } void* data_ptr() { return c10::visit([](auto & a){ return static_cast<void*>(&a); }, array); } private: ArrayTypes array; }; struct TrivialOffsetCalculatorVariant { #define DEFINE_CASE(index) TrivialOffsetCalculator<index> using TrivialOffsetCalculatorTypes = c10::variant< AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) >; #undef DEFINE_CASE TrivialOffsetCalculatorVariant(int num) { switch(num) { #define DEFINE_CASE(index) \ case index: v = TrivialOffsetCalculator<index>(); break; AT_FOR_8_CASES(DEFINE_CASE) #undef DEFINE_CASE default: TORCH_CHECK(false, "TrivialOffsetCalculatorVariant is not implemented for num_tensors = ", num); } } void* data_ptr() { return c10::visit([](auto & v){ return static_cast<void*>(&v); }, v); } private: TrivialOffsetCalculatorTypes v; }; struct LoadWithCastVariant { #define DEFINE_CASE(index) std::unique_ptr<memory::LoadWithCast<index>> using LoadWithCastPtr = c10::variant< AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) >; #undef DEFINE_CASE LoadWithCastVariant(const TensorIteratorBase& iter) { int arity = iter.ninputs(); switch(arity) { #define DEFINE_CASE(index) \ case index: v = std::make_unique<memory::LoadWithCast<index>>(iter); break; AT_FOR_8_CASES(DEFINE_CASE) #undef DEFINE_CASE default: TORCH_CHECK(false, "LoadWithCastVariant is not implemented for ninputs = ", arity); } } void* data_ptr() { return c10::visit([](auto & v){ return static_cast<void*>(v.get()); }, v); } private: LoadWithCastPtr v; }; struct StoreWithCastVariant { #define DEFINE_CASE(index) std::unique_ptr<memory::StoreWithCast<index>> using StoreWithCastPtr = c10::variant< AT_FOR_8_CASES_WITH_COMMA(DEFINE_CASE) >; #undef DEFINE_CASE StoreWithCastVariant(const TensorIteratorBase& iter) { int num = iter.noutputs(); switch(num) { #define DEFINE_CASE(index) \ case index: v = std::make_unique<memory::StoreWithCast<index>>(iter); break; AT_FOR_8_CASES(DEFINE_CASE) #undef DEFINE_CASE default: TORCH_CHECK(false, "StoreWithCastVariant is not implemented for noutputs = ", num); } } void* data_ptr() { return c10::visit([](auto & v){ return static_cast<void*>(v.get()); }, v); } private: StoreWithCastPtr v; }; }} // namespace at::native #endif // AT_USE_JITERATOR()
7,127
27.398406
112
h
null
pytorch-main/aten/src/ATen/cuda/detail/CUDAHooks.h
#pragma once #include <ATen/detail/CUDAHooksInterface.h> #include <ATen/Generator.h> #include <c10/util/Optional.h> // TODO: No need to have this whole header, we can just put it all in // the cpp file namespace at { namespace cuda { namespace detail { // Set the callback to initialize Magma, which is set by // torch_cuda_cu. This indirection is required so magma_init is called // in the same library where Magma will be used. TORCH_CUDA_CPP_API void set_magma_init_fn(void (*magma_init_fn)()); // The real implementation of CUDAHooksInterface struct CUDAHooks : public at::CUDAHooksInterface { CUDAHooks(at::CUDAHooksArgs) {} void initCUDA() const override; Device getDeviceFromPtr(void* data) const override; bool isPinnedPtr(const void* data) const override; const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const override; bool hasCUDA() const override; bool hasMAGMA() const override; bool hasCuDNN() const override; bool hasCuSOLVER() const override; bool hasROCM() const override; const at::cuda::NVRTC& nvrtc() const override; int64_t current_device() const override; bool hasPrimaryContext(int64_t device_index) const override; Allocator* getCUDADeviceAllocator() const override; Allocator* getPinnedMemoryAllocator() const override; bool compiledWithCuDNN() const override; bool compiledWithMIOpen() const override; bool supportsDilatedConvolutionWithCuDNN() const override; bool supportsDepthwiseConvolutionWithCuDNN() const override; bool supportsBFloat16ConvolutionWithCuDNNv8() const override; bool hasCUDART() const override; long versionCUDART() const override; long versionCuDNN() const override; std::string showConfig() const override; double batchnormMinEpsilonCuDNN() const override; int64_t cuFFTGetPlanCacheMaxSize(int64_t device_index) const override; void cuFFTSetPlanCacheMaxSize(int64_t device_index, int64_t max_size) const override; int64_t cuFFTGetPlanCacheSize(int64_t device_index) const override; void cuFFTClearPlanCache(int64_t device_index) const override; int getNumGPUs() const override; void deviceSynchronize(int64_t device_index) const override; }; }}} // at::cuda::detail
2,210
39.2
89
h
null
pytorch-main/aten/src/ATen/cuda/detail/DeviceThreadHandles.h
// Some stateful GPU libraries, such as cuDNN, cuBLAS, use handles to store states. // These handles are tied to device, and these libraries requires/recommends not to // share handles across host threads. // // These libraries recommend using one handle per host thread. We may not want to do // this because threads are relatively light-weight, but creating and destroying // handles is expensive (destroying the handle causes synchronizations). DataParallel, // for example, creates new threads for each forward pass. // // This file implements a handle pool mechanism. The handle pool returns handles on // demand as threads request them. If all existing handles in the pool are in use, // it creates a new one. As threads terminate, they release handles back into the pool. // In this way, the handle pool never creates more handles than the high-water mark of // active threads, so it's efficient with DataParallel. #pragma once #include <unordered_map> #include <vector> #include <utility> #include <mutex> #include <memory> #include <c10/util/Exception.h> namespace at { namespace cuda { namespace { template <typename Handle_t, void Create(Handle_t *), void Destroy(Handle_t)> struct DeviceThreadHandlePool : public std::enable_shared_from_this<DeviceThreadHandlePool<Handle_t, Create, Destroy>> { struct Handle { Handle_t handle; Handle(bool create = false) : handle(nullptr) { if(create) Create(&handle); } // std::vector.emplace() and push_back() may route through temporaries and call // copy/move constructors along the way. If this is the case, we don't want // the destructors of temporaries to call cudnnDestroy on the handle. // We can achieve safety (for the narrow case of stashing within std::vectors) // by making Handle moveable but not copyable, and transferring handle ownership // to the latest constructed object. This is not a substitute for full-blown // reference counting, but reference counting may be overkill here. // Another alternative is to wrap the saved Handles in unique_ptrs, i.e., // unordered_map<int, vector<unique_ptr<Handle>>> created_handles; Handle(const Handle& rhs) = delete; // Following https://stackoverflow.com/questions/3279543/what-is-the-copy-and-swap-idiom Handle(Handle&& rhs) : Handle() { std::swap(handle, rhs.handle); } // operator= takes argument by value Handle& operator=(Handle rhs) { std::swap(handle, rhs.handle); return *this; } ~Handle() { if(handle) Destroy(handle); } }; std::mutex mutex; // Handles are lazily created as different threads request them, // but are never destroyed until the end of the process. // The maximum number of handles this process will create for each device is equal // to the high-water mark of the number of concurrently active threads that request // handles for that device. // When threads terminate, they release their handles back into the pool for reuse. // Otherwise, new handles would be created every time new threads were spawned, // resulting in poor performance for Python modules that repeatedly or frequently // spawned new sets of threads (like DataParallel, which creates a new set of threads // for each forward pass). // // To prevent potential deadlocks, we explicitly choose not to cap the number // of handles that are created per device. // Example of danger: If we cap the max handles at 4, and 5 threads are sharing a device, // only 4 can make forward progress at any time. The other 4 will not release their // handles until they exit, so the fifth cannot make progress until then. This is // not a problem...UNLESS all 5 threads attempt some sort of synchronization at an // intermediate point (ie, before any of them have exited). We have no way to anticipate // or enforce that user threads will not attempt such intermediate synchronization. // The only way to ensure safety is to avoid imposing a cap on the number of handles. std::unordered_map<int, std::vector<Handle>> created_handles; std::unordered_map<int, std::vector<Handle_t>> available_handles; // PoolWindow lazily creates and caches the handles that a particular thread is using, // so in the common case handle access doesn't incur either handle creation or a mutex lock. class PoolWindow { public: PoolWindow(std::shared_ptr<DeviceThreadHandlePool> parent): weak_parent(std::move(parent)) {} ~PoolWindow(){ release(); } Handle_t reserve(int device) { // If this thread already has a handle for this device, return it if(my_handles.find(device) != my_handles.end()) return my_handles[device]; // otherwise, either grab a handle from the pool if one is available, // or if not, create a new one. auto parent = weak_parent.lock(); TORCH_CHECK(parent, "Cannot create handle during program termination"); std::lock_guard<std::mutex> guard(parent->mutex); if(parent->available_handles[device].size() > 0) { my_handles[device] = parent->available_handles[device].back(); parent->available_handles[device].pop_back(); } else { // In local testing, I do observe that emplace_back sometimes routes through temporaries // that incur move-constructor and destructor calls. See comments in Handle above. parent->created_handles[device].emplace_back(true /*create*/); my_handles[device] = parent->created_handles[device].back().handle; } return my_handles[device]; } private: // Stores the per-device handles currently owned by this thread std::unordered_map<int, Handle_t> my_handles; std::weak_ptr<DeviceThreadHandlePool> weak_parent; // Called by the destructor. Releases this thread's handles back into the pool. void release() { if(my_handles.size() > 0) { auto parent = weak_parent.lock(); if (!parent) { // If this thread exits after atexit handlers have completed, the // cuda context itself may be invalid, so we must leak the handles. return; } std::lock_guard<std::mutex> guard(parent->mutex); for(auto d_h : my_handles) parent->available_handles[d_h.first].push_back(d_h.second); } } }; // Warning: // If you want to change this function, be aware that this function will be called // by multiple threads and there is no mutex guarding the call of this function, so // make sure your implementation is thread-safe. PoolWindow *newPoolWindow() { // The returned pointer will be owned by a thread local variable // so that different threads does not share the same PoolWindow. return new PoolWindow(this->shared_from_this()); } }; }}} // namespace at::cuda::detail::<anonymous>
7,020
45.190789
120
h
null
pytorch-main/aten/src/ATen/cuda/detail/KernelUtils.h
#pragma once #include <limits> #include <c10/util/Exception.h> namespace at { namespace cuda { namespace detail { // CUDA: grid stride looping // // int64_t _i_n_d_e_x specifically prevents overflow in the loop increment. // If input.numel() < INT_MAX, _i_n_d_e_x < INT_MAX, except after the final // iteration of the loop where _i_n_d_e_x += blockDim.x * gridDim.x can be // greater than INT_MAX. But in that case _i_n_d_e_x >= n, so there are no // further iterations and the overflowed value in i=_i_n_d_e_x is not used. #define CUDA_KERNEL_LOOP_TYPE(i, n, index_type) \ int64_t _i_n_d_e_x = blockIdx.x * blockDim.x + threadIdx.x; \ for (index_type i=_i_n_d_e_x; _i_n_d_e_x < (n); _i_n_d_e_x+=blockDim.x * gridDim.x, i=_i_n_d_e_x) #define CUDA_KERNEL_LOOP(i, n) CUDA_KERNEL_LOOP_TYPE(i, n, int) // Use 1024 threads per block, which requires cuda sm_2x or above constexpr int CUDA_NUM_THREADS = 1024; // CUDA: number of blocks for threads. inline int GET_BLOCKS(const int64_t N, const int64_t max_threads_per_block=CUDA_NUM_THREADS) { TORCH_INTERNAL_ASSERT(N > 0, "CUDA kernel launch blocks must be positive, but got N=", N); constexpr int64_t max_int = std::numeric_limits<int>::max(); // Round up division for positive number that cannot cause integer overflow auto block_num = (N - 1) / max_threads_per_block + 1; TORCH_INTERNAL_ASSERT(block_num <= max_int, "Can't schedule too many blocks on CUDA device"); return static_cast<int>(block_num); } }}} // namespace at::cuda::detail
1,547
39.736842
99
h
null
pytorch-main/aten/src/ATen/cuda/nvrtc_stub/ATenNVRTC.h
#pragma once #include <ATen/cuda/ATenCUDAGeneral.h> #include <cuda.h> #include <nvrtc.h> namespace at { namespace cuda { // NOTE [ USE OF NVRTC AND DRIVER API ] // // ATen does not directly link to either libnvrtc or libcuda because they // require libcuda to be installed, yet we want our GPU build to work on CPU // machines as long as CUDA is not initialized. // // Normal CUDA code in torch uses the cuda runtime libraries which can be // installed even if the driver is not installed, but sometimes we specifically // need to use the driver API (e.g., to load JIT compiled code). // To accomplish this, we lazily link libcaffe2_nvrtc which provides a struct // at::cuda::NVRTC that contains function pointers to all of the apis we need. // // IT IS AN ERROR TO TRY TO CALL ANY nvrtc* or cu* FUNCTION DIRECTLY. // INSTEAD USE, e.g. // detail::getCUDAHooks().nvrtc().cuLoadModule(...) // or // globalContext().getNVRTC().cuLoadModule(...) // // If a function is missing add it to the list in ATen/cuda/nvrtc_stub/ATenNVRTC.h // and edit ATen/cuda/detail/LazyNVRTC.cpp accordingly (e.g., via one of the stub // macros). #if !defined(USE_ROCM) #define AT_FORALL_NVRTC_BASE(_) \ _(nvrtcVersion) \ _(nvrtcAddNameExpression) \ _(nvrtcCreateProgram) \ _(nvrtcDestroyProgram) \ _(nvrtcGetPTXSize) \ _(nvrtcGetPTX) \ _(nvrtcCompileProgram) \ _(nvrtcGetErrorString) \ _(nvrtcGetProgramLogSize) \ _(nvrtcGetProgramLog) \ _(nvrtcGetLoweredName) \ _(cuModuleLoadData) \ _(cuModuleLoadDataEx) \ _(cuModuleGetFunction) \ _(cuOccupancyMaxActiveBlocksPerMultiprocessor) \ _(cuGetErrorString) \ _(cuLaunchKernel) \ _(cuLaunchCooperativeKernel) \ _(cuCtxGetCurrent) \ _(cuModuleUnload) \ _(cuDevicePrimaryCtxGetState) \ _(cuLinkCreate) \ _(cuLinkAddData) \ _(cuLinkComplete) \ _(cuFuncSetAttribute) \ _(cuFuncGetAttribute) #if defined(CUDA_VERSION) && CUDA_VERSION >= 11010 #define AT_FORALL_NVRTC(_) \ AT_FORALL_NVRTC_BASE(_) \ _(nvrtcGetCUBINSize) \ _(nvrtcGetCUBIN) #else #define AT_FORALL_NVRTC(_) \ AT_FORALL_NVRTC_BASE(_) #endif #else // NOTE [ ATen NVRTC Stub and HIP ] // // ATen's NVRTC stub library, caffe2_nvrtc, provides dynamic loading of both // NVRTC and driver APIs. While the former is not yet supported for HIP, the // later is supported and needed (e.g., in CUDAHooks::getDeviceWithPrimaryContext() // used by tensor.pin_memory()). // // The macro below strips out certain unsupported operations on HIP from the full // list above. // // HIP doesn't have // cuGetErrorString (maps to non-functional hipGetErrorString___) // // HIP from ROCm 3.5 on renamed hipOccupancyMaxActiveBlocksPerMultiprocessor // to hipModuleOccupancyMaxActiveBlocksPerMultiprocessor. #if TORCH_HIP_VERSION < 305 #define HIPOCCUPANCYMAXACTIVEBLOCKSPERMULTIPROCESSOR hipOccupancyMaxActiveBlocksPerMultiprocessor #else #define HIPOCCUPANCYMAXACTIVEBLOCKSPERMULTIPROCESSOR cuOccupancyMaxActiveBlocksPerMultiprocessor #endif #define AT_FORALL_NVRTC(_) \ _(nvrtcVersion) \ _(nvrtcCreateProgram) \ _(nvrtcAddNameExpression) \ _(nvrtcDestroyProgram) \ _(nvrtcGetPTXSize) \ _(nvrtcGetPTX) \ _(cuModuleLoadData) \ _(cuModuleGetFunction) \ _(HIPOCCUPANCYMAXACTIVEBLOCKSPERMULTIPROCESSOR) \ _(nvrtcGetErrorString) \ _(nvrtcGetProgramLogSize) \ _(nvrtcGetProgramLog) \ _(cuLaunchKernel) \ _(nvrtcCompileProgram) \ _(cuCtxGetCurrent) \ _(nvrtcGetLoweredName) \ _(cuModuleUnload) \ _(cuDevicePrimaryCtxGetState) #endif extern "C" typedef struct NVRTC { #define CREATE_MEMBER(name) decltype(&name) name; AT_FORALL_NVRTC(CREATE_MEMBER) #undef CREATE_MEMBER } NVRTC; extern "C" TORCH_CUDA_CPP_API NVRTC* load_nvrtc(); }} // at::cuda
4,807
37.464
97
h
null
pytorch-main/aten/src/ATen/cudnn/Descriptors.h
#pragma once #include <string> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include <ATen/cudnn/cudnn-wrapper.h> #include <ATen/cudnn/Utils.h> #include <ATen/core/Tensor.h> #include <ATen/TensorUtils.h> #include <ATen/cuda/ATenCUDAGeneral.h> #include <cuda.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty.h> #endif namespace at { namespace native { std::string cudnnTypeToString(cudnnDataType_t dtype); // TODO: Add constructors for all of the descriptors inline int dataSize(cudnnDataType_t dataType) { switch (dataType) { #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8200 case CUDNN_DATA_BFLOAT16: #endif case CUDNN_DATA_HALF: return 2; case CUDNN_DATA_FLOAT: return 4; default: return 8; } } // The stride for a size-1 dimensions is not uniquely determined; in // fact, it can be anything you want, because the fact that the // tensor is size 1 at this dimension means that you will never actually // try advancing your pointer by this stride. // // However, CuDNN has a much more stringent requirement on strides: // if you are passing a contiguous input, it better be the case // that the stride for dim i is the product of the sizes of dims // i+1 to the end. This stride is indeed uniquely determined. This // function modifies 'stride' in place so this invariant holds. template <typename T> static inline void fixSizeOneDimStride(int dim, const T *size, T *stride, bool nhwc) { int64_t z = 1; int index = 0; std::vector<int> permutation(dim); if (nhwc) { permutation[index++] = 1; } for (int d = dim-1; d > 1; d--) { permutation[index++] = d; } if (!nhwc) { permutation[index++] = 1; } permutation[index++] = 0; for (int d : permutation) { if (size[d] == 1) { stride[d] = z; } else { z *= size[d]; } } } template <typename T, cudnnStatus_t (*dtor)(T*)> struct DescriptorDeleter { void operator()(T* x) { if (x != nullptr) { AT_CUDNN_CHECK(dtor(x)); } } }; // A generic class for wrapping cuDNN descriptor types. All you need // is to give the underlying type the Descriptor_t points to (usually, // if it's cudnnTensorDescriptor_t it points to cudnnTensorStruct), // the constructor and the destructor. Subclasses are responsible // for defining a set() function to actually set the descriptor. // // Descriptors default construct to a nullptr, and have a descriptor // initialized the first time you call set() or any other initializing // function. template <typename T, cudnnStatus_t (*ctor)(T**), cudnnStatus_t (*dtor)(T*)> class TORCH_CUDA_CPP_API Descriptor { public: // TODO: Figure out why const-correctness doesn't work here // Use desc() to access the underlying descriptor pointer in // a read-only fashion. Most client code should use this. // If the descriptor was never initialized, this will return // nullptr. T* desc() const { return desc_.get(); } T* desc() { return desc_.get(); } // Use mut_desc() to access the underlying descriptor pointer // if you intend to modify what it points to (e.g., using // cudnnSetFooDescriptor). This will ensure that the descriptor // is initialized. Code in this file will use this function. T* mut_desc() { init(); return desc_.get(); } protected: void init() { if (desc_ == nullptr) { T* raw_desc; AT_CUDNN_CHECK(ctor(&raw_desc)); desc_.reset(raw_desc); } } private: std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_; }; class TORCH_CUDA_CPP_API TensorDescriptor : public Descriptor< cudnnTensorStruct, &cudnnCreateTensorDescriptor, &cudnnDestroyTensorDescriptor> { public: TensorDescriptor() = default; explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) { set(t, pad); } // Note [CuDNN broadcast padding] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // pad specifies the minimum dimensionality of the tensor descriptor // we produce (it doesn't have anything to do with, e.g., convolution // padding). If 't' is lower-dimensional than 'pad', the remaining // dimensions (on the right) are padded with ones. This doesn't // affect the underlying data layout. This is particularly useful for // dealing with a peculiarity of the CuDNN API, which is that broadcasting in CuDNN is // done in two steps: first, the client code is expected to pad out // (the dimensions) input tensors to be the same dimension as the // target broadcast, and then second, CuDNN takes of actually // broadcasting size 1 dimensions. void set(const at::Tensor &t, size_t pad = 0); void set(const at::Tensor &t, at::MemoryFormat memory_format, size_t pad = 0); void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0); void print(); private: void set(cudnnDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad, bool nhwc); void set(cudnnDataType_t dataType, int dim, int* size, int* stride, bool nhwc) { fixSizeOneDimStride<int>(dim, size, stride, nhwc); AT_CUDNN_CHECK(cudnnSetTensorNdDescriptor(mut_desc(), dataType, dim, size, stride)); } }; std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d); class TORCH_CUDA_CPP_API FilterDescriptor : public Descriptor< cudnnFilterStruct, &cudnnCreateFilterDescriptor, &cudnnDestroyFilterDescriptor> { public: void set(const at::Tensor &t, int64_t pad = 0) { set(t, at::MemoryFormat::Contiguous, pad); } void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0); void print(); private: void set(cudnnDataType_t dataType, int dim, int* size, cudnnTensorFormat_t filter_format) { AT_CUDNN_CHECK(cudnnSetFilterNdDescriptor(mut_desc(), dataType, filter_format, dim, size)); } }; std::ostream& operator<<(std::ostream & out, const FilterDescriptor& d); struct TORCH_CUDA_CPP_API ConvolutionDescriptor : public Descriptor< cudnnConvolutionStruct, &cudnnCreateConvolutionDescriptor, &cudnnDestroyConvolutionDescriptor> { void set(cudnnDataType_t dataType, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool allow_tf32) { cudnnDataType_t mathType = dataType; if (dataType == CUDNN_DATA_HALF) mathType = CUDNN_DATA_FLOAT; AT_CUDNN_CHECK(cudnnSetConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale, CUDNN_CROSS_CORRELATION, mathType)); AT_CUDNN_CHECK(cudnnSetConvolutionGroupCount(mut_desc(), groups)); // See Note [behavior of cudnnFind and cudnnGet] AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_DEFAULT_MATH)); if(dataType == CUDNN_DATA_HALF) { AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_TENSOR_OP_MATH)); } else if (dataType == CUDNN_DATA_FLOAT && !allow_tf32) { #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8000 AT_CUDNN_CHECK(cudnnSetConvolutionMathType(mut_desc(), CUDNN_FMA_MATH)); #endif } } }; struct TORCH_CUDA_CPP_API SpatialTransformerDescriptor : public Descriptor< cudnnSpatialTransformerStruct, &cudnnCreateSpatialTransformerDescriptor, &cudnnDestroySpatialTransformerDescriptor> { void set(cudnnDataType_t dataType, int dim, int* size) { AT_CUDNN_CHECK(cudnnSetSpatialTransformerNdDescriptor(mut_desc(), CUDNN_SAMPLER_BILINEAR, dataType, dim, size)); } }; struct TORCH_CUDA_CPP_API DropoutDescriptor : public Descriptor< cudnnDropoutStruct, &cudnnCreateDropoutDescriptor, &cudnnDestroyDropoutDescriptor> { at::Tensor state; // Initialize a dropout descriptor's RNG state. // WARNING: This function is very expensive, avoid calling this function! void initialize_rng(cudnnHandle_t handle, float dropout, long long int seed, const TensorOptions& options) { TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); size_t state_size; AT_CUDNN_CHECK(cudnnDropoutGetStatesSize(handle, &state_size)); AT_ASSERT(options.device().type() == kCUDA); AT_ASSERT(options.dtype() == kByte); state = at::empty({static_cast<int64_t>(state_size)}, options); AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, dropout, state.data_ptr(), state_size, seed)); } // Restore a dropout descriptor given a dropout probability and existing RNG state. void set(cudnnHandle_t handle, float dropout, at::Tensor state_) { TORCH_INTERNAL_ASSERT(dropout > 0, "dropout must be nonzero; otherwise call set_no_dropout"); state = state_; void *state_ptr = state.data_ptr(); size_t state_size = state.size(0); // NB: The seed doesn't actually matter, so we give a dummy value AT_CUDNN_CHECK(cudnnRestoreDropoutDescriptor(mut_desc(), handle, dropout, state_ptr, state_size, 0 /* seed */)); } // Restore a dropout descriptor corresponding to no dropout void set_no_dropout(cudnnHandle_t handle) { // NB: seed doesn't matter when dropout = 0, because no random number // initialization actually takes place when there is no dropout. // NB: Empirically, cudnnSetDropoutDescriptor is cheap when // dropout == 0 AT_CUDNN_CHECK(cudnnSetDropoutDescriptor(mut_desc(), handle, 0 /* dropout */, nullptr, 0 /* state_size */, 0 /* seed */)); } }; struct TORCH_CUDA_CPP_API RNNDescriptor : public Descriptor< cudnnRNNStruct, &cudnnCreateRNNDescriptor, &cudnnDestroyRNNDescriptor> { DropoutDescriptor dropout_desc_; void set(cudnnHandle_t handle, int hidden_size, int proj_size, int num_layers, DropoutDescriptor&& dropout_desc, cudnnRNNInputMode_t input_mode, cudnnDirectionMode_t bidirectional, cudnnRNNMode_t mode, cudnnDataType_t datatype, cudnnDataType_t input_type, cudnnRNNAlgo_t algo, bool allow_tf32) { dropout_desc_ = std::move(dropout_desc); AT_CUDNN_CHECK(cudnnSetRNNDescriptor_v6( handle, mut_desc(), hidden_size, num_layers, dropout_desc_.desc(), input_mode, bidirectional, mode, algo, datatype)); if (proj_size != 0) { AT_CUDNN_CHECK(cudnnSetRNNProjectionLayers( handle, /*rnnDesc=*/mut_desc(), /*recProjSize=*/proj_size, /*outProjSize=*/0)); } cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties(); if (prop->major >= 7) { if (input_type == CUDNN_DATA_HALF) { cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_TENSOR_OP_MATH); } #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 8000 else if (input_type == CUDNN_DATA_FLOAT && !allow_tf32) { cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_FMA_MATH); } #endif else { // Technically, as the default it's not necessary to explicitly // set this. cudnnSetRNNMatrixMathType(mut_desc(), CUDNN_DEFAULT_MATH); } } } }; struct TORCH_CUDA_CPP_API CTCLossDescriptor : public Descriptor< cudnnCTCLossStruct, &cudnnCreateCTCLossDescriptor, &cudnnDestroyCTCLossDescriptor> { void set(cudnnDataType_t datatype) { AT_CUDNN_CHECK(cudnnSetCTCLossDescriptor(mut_desc(), datatype)); } #if CUDNN_VERSION >= 7600 void setEx( cudnnDataType_t datatype, cudnnLossNormalizationMode_t normMode, cudnnNanPropagation_t gradMode) { AT_CUDNN_CHECK( cudnnSetCTCLossDescriptorEx(mut_desc(), datatype, normMode, gradMode)); } #endif }; struct TORCH_CUDA_CPP_API ActivationDescriptor : public Descriptor< cudnnActivationStruct, &cudnnCreateActivationDescriptor, &cudnnDestroyActivationDescriptor> { void set(cudnnActivationMode_t mode) { AT_ASSERT( mode == CUDNN_ACTIVATION_RELU, "TODO: support more cuDNN activation modes"); AT_CUDNN_CHECK(cudnnSetActivationDescriptor( mut_desc(), mode, cudnnNanPropagation_t::CUDNN_NOT_PROPAGATE_NAN, std::numeric_limits<double>::max())); } }; union Constant { float f; double d; Constant(cudnnDataType_t dataType, double value) { if (dataType == CUDNN_DATA_HALF || dataType == CUDNN_DATA_FLOAT) { f = static_cast<float>(value); } else { d = value; } } }; }} // namespace
12,753
35.44
133
h
null
pytorch-main/aten/src/ATen/cudnn/Utils.h
#pragma once #include <ATen/core/Tensor.h> #include <ATen/cuda/Exceptions.h> #include <ATen/cudnn/cudnn-wrapper.h> #include <ATen/cudnn/Handle.h> namespace at { namespace native { // cuDNN has a buggy check for tensor being contiguous (that is, it does // not ignore stride for dimension that is equal to 0). This function // makes tensors which have zero stride contiguous, by setting the // strides to 1 as cuDNN likes. inline Tensor contiguousIfZeroInStrides(const Tensor& t) { for (auto s : t.strides()) { if (s == 0) return t.contiguous(); } return t; } }}
577
25.272727
72
h
null
pytorch-main/aten/src/ATen/detail/CUDAHooksInterface.h
#pragma once #include <c10/core/Allocator.h> #include <c10/util/Exception.h> #include <c10/util/Optional.h> #include <c10/util/Registry.h> #include <cstddef> #include <functional> #include <memory> // Forward-declares at::Context, at::Generator and at::cuda::NVRTC namespace at { class Context; struct Generator; namespace cuda { struct NVRTC; } // namespace cuda } // namespace at // NB: Class must live in `at` due to limitations of Registry.h. namespace at { #ifdef _MSC_VER constexpr const char* CUDA_HELP = "PyTorch splits its backend into two shared libraries: a CPU library " "and a CUDA library; this error has occurred because you are trying " "to use some CUDA functionality, but the CUDA library has not been " "loaded by the dynamic linker for some reason. The CUDA library MUST " "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! " "One common culprit is a lack of -INCLUDE:?warp_size@cuda@at@@YAHXZ " "in your link arguments; many dynamic linkers will delete dynamic library " "dependencies if you don't depend on any of their symbols. You can check " "if this has occurred by using link on your binary to see if there is a " "dependency on *_cuda.dll library."; #else constexpr const char* CUDA_HELP = "PyTorch splits its backend into two shared libraries: a CPU library " "and a CUDA library; this error has occurred because you are trying " "to use some CUDA functionality, but the CUDA library has not been " "loaded by the dynamic linker for some reason. The CUDA library MUST " "be loaded, EVEN IF you don't directly use any symbols from the CUDA library! " "One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many " "dynamic linkers will delete dynamic library dependencies if you don't " "depend on any of their symbols. You can check if this has occurred by " "using ldd on your binary to see if there is a dependency on *_cuda.so " "library."; #endif // The CUDAHooksInterface is an omnibus interface for any CUDA functionality // which we may want to call into from CPU code (and thus must be dynamically // dispatched, to allow for separate compilation of CUDA code). How do I // decide if a function should live in this class? There are two tests: // // 1. Does the *implementation* of this function require linking against // CUDA libraries? // // 2. Is this function *called* from non-CUDA ATen code? // // (2) should filter out many ostensible use-cases, since many times a CUDA // function provided by ATen is only really ever used by actual CUDA code. // // TODO: Consider putting the stub definitions in another class, so that one // never forgets to implement each virtual function in the real implementation // in CUDAHooks. This probably doesn't buy us much though. struct TORCH_API CUDAHooksInterface { // This should never actually be implemented, but it is used to // squelch -Werror=non-virtual-dtor virtual ~CUDAHooksInterface() = default; // Initialize THCState and, transitively, the CUDA state virtual void initCUDA() const { TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP); } virtual const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const { (void)device_index; // Suppress unused variable warning TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP); } virtual Device getDeviceFromPtr(void* /*data*/) const { TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP); } virtual bool isPinnedPtr(const void* /*data*/) const { return false; } virtual bool hasCUDA() const { return false; } virtual bool hasCUDART() const { return false; } virtual bool hasMAGMA() const { return false; } virtual bool hasCuDNN() const { return false; } virtual bool hasCuSOLVER() const { return false; } virtual bool hasROCM() const { return false; } virtual const at::cuda::NVRTC& nvrtc() const { TORCH_CHECK(false, "NVRTC requires CUDA. ", CUDA_HELP); } virtual bool hasPrimaryContext(int64_t device_index) const { TORCH_CHECK(false, "Cannot call hasPrimaryContext(", device_index, ") without ATen_cuda library. ", CUDA_HELP); } virtual int64_t current_device() const { return -1; } virtual Allocator* getPinnedMemoryAllocator() const { TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP); } virtual Allocator* getCUDADeviceAllocator() const { TORCH_CHECK(false, "CUDADeviceAllocator requires CUDA. ", CUDA_HELP); } virtual bool compiledWithCuDNN() const { return false; } virtual bool compiledWithMIOpen() const { return false; } virtual bool supportsDilatedConvolutionWithCuDNN() const { return false; } virtual bool supportsDepthwiseConvolutionWithCuDNN() const { return false; } virtual bool supportsBFloat16ConvolutionWithCuDNNv8() const { return false; } virtual long versionCuDNN() const { TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP); } virtual long versionCUDART() const { TORCH_CHECK(false, "Cannot query CUDART version without ATen_cuda library. ", CUDA_HELP); } virtual std::string showConfig() const { TORCH_CHECK(false, "Cannot query detailed CUDA version without ATen_cuda library. ", CUDA_HELP); } virtual double batchnormMinEpsilonCuDNN() const { TORCH_CHECK(false, "Cannot query batchnormMinEpsilonCuDNN() without ATen_cuda library. ", CUDA_HELP); } virtual int64_t cuFFTGetPlanCacheMaxSize(int64_t /*device_index*/) const { TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); } virtual void cuFFTSetPlanCacheMaxSize(int64_t /*device_index*/, int64_t /*max_size*/) const { TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); } virtual int64_t cuFFTGetPlanCacheSize(int64_t /*device_index*/) const { TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); } virtual void cuFFTClearPlanCache(int64_t /*device_index*/) const { TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP); } virtual int getNumGPUs() const { return 0; } virtual void deviceSynchronize(int64_t /*device_index*/) const { TORCH_CHECK(false, "Cannot synchronize CUDA device without ATen_cuda library. ", CUDA_HELP); } }; // NB: dummy argument to suppress "ISO C++11 requires at least one argument // for the "..." in a variadic macro" struct TORCH_API CUDAHooksArgs {}; TORCH_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs); #define REGISTER_CUDA_HOOKS(clsname) \ C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname) namespace detail { TORCH_API const CUDAHooksInterface& getCUDAHooks(); } // namespace detail } // namespace at
7,025
32.942029
115
h
null
pytorch-main/aten/src/ATen/detail/HIPHooksInterface.h
#pragma once #include <c10/core/Allocator.h> #include <ATen/core/Generator.h> #include <c10/util/Exception.h> #include <c10/util/Registry.h> #include <cstddef> #include <functional> #include <memory> namespace at { class Context; } // NB: Class must live in `at` due to limitations of Registry.h. namespace at { // The HIPHooksInterface is an omnibus interface for any HIP functionality // which we may want to call into from CPU code (and thus must be dynamically // dispatched, to allow for separate compilation of HIP code). See // CUDAHooksInterface for more detailed motivation. struct TORCH_API HIPHooksInterface { // This should never actually be implemented, but it is used to // squelch -Werror=non-virtual-dtor virtual ~HIPHooksInterface() = default; // Initialize the HIP library state virtual void initHIP() const { AT_ERROR("Cannot initialize HIP without ATen_hip library."); } virtual std::unique_ptr<c10::GeneratorImpl> initHIPGenerator(Context*) const { AT_ERROR("Cannot initialize HIP generator without ATen_hip library."); } virtual bool hasHIP() const { return false; } virtual int64_t current_device() const { return -1; } virtual Allocator* getPinnedMemoryAllocator() const { AT_ERROR("Pinned memory requires HIP."); } virtual void registerHIPTypes(Context*) const { AT_ERROR("Cannot registerHIPTypes() without ATen_hip library."); } virtual int getNumGPUs() const { return 0; } }; // NB: dummy argument to suppress "ISO C++11 requires at least one argument // for the "..." in a variadic macro" struct TORCH_API HIPHooksArgs {}; TORCH_DECLARE_REGISTRY(HIPHooksRegistry, HIPHooksInterface, HIPHooksArgs); #define REGISTER_HIP_HOOKS(clsname) \ C10_REGISTER_CLASS(HIPHooksRegistry, clsname, clsname) namespace detail { TORCH_API const HIPHooksInterface& getHIPHooks(); } // namespace detail } // namespace at
1,912
25.569444
80
h