Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h +119 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h +18 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h +58 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h +14 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h +239 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h +13 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h +51 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h +10 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h +23 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h +14 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h +25 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h +31 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h +5 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h +66 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h +68 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h +44 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h +455 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h +45 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h +10 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h +410 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h +19 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h +36 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h +26 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h +21 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_operators.h +25 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h +61 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h +34 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h +1279 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h +45 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h +9 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h +6 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h +398 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h +146 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h +79 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h +98 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h +36 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h +286 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h +104 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h +324 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h +83 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h +493 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h +111 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h +25 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h +57 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h +129 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h +111 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h +212 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h +281 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h +22 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h +62 -0
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend.h
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/builtin_function.h>
|
| 4 |
+
#include <ATen/core/stack.h>
|
| 5 |
+
#include <torch/csrc/jit/backends/backend_interface.h>
|
| 6 |
+
#include <torch/custom_class.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace jit {
|
| 10 |
+
namespace {
|
| 11 |
+
// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
|
| 12 |
+
inline c10::FunctionSchema getIsAvailableSchema() {
|
| 13 |
+
c10::Argument self("self", c10::AnyType::get());
|
| 14 |
+
c10::Argument available("available", c10::BoolType::get());
|
| 15 |
+
c10::FunctionSchema preprocessor_schema(
|
| 16 |
+
"is_available",
|
| 17 |
+
/*overload_name=*/"",
|
| 18 |
+
/*arguments=*/{self},
|
| 19 |
+
/*returns=*/{available});
|
| 20 |
+
return preprocessor_schema;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
constexpr static auto kBackendsNamespace = "__backends__";
|
| 24 |
+
|
| 25 |
+
// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
|
| 26 |
+
inline c10::FunctionSchema getCompileSchema() {
|
| 27 |
+
c10::Argument self("self", c10::AnyType::get());
|
| 28 |
+
c10::Argument mod("processed", c10::AnyType::get());
|
| 29 |
+
auto any_dict_ty =
|
| 30 |
+
c10::DictType::create(c10::StringType::get(), c10::AnyType::get());
|
| 31 |
+
c10::Argument method_compile_spec("method_compile_spec", any_dict_ty);
|
| 32 |
+
c10::Argument handles("handles", any_dict_ty);
|
| 33 |
+
|
| 34 |
+
c10::FunctionSchema compile_schema(
|
| 35 |
+
"compile",
|
| 36 |
+
/*overload_name=*/"",
|
| 37 |
+
/*arguments=*/{self, mod, method_compile_spec},
|
| 38 |
+
/*returns=*/{handles});
|
| 39 |
+
return compile_schema;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
|
| 43 |
+
inline c10::FunctionSchema getExecuteSchema() {
|
| 44 |
+
auto any_list_ty = c10::ListType::create(c10::AnyType::get());
|
| 45 |
+
c10::Argument self("self", c10::AnyType::get());
|
| 46 |
+
c10::Argument handle("handle", c10::AnyType::get());
|
| 47 |
+
c10::Argument input("input", any_list_ty);
|
| 48 |
+
c10::Argument output("output", any_list_ty);
|
| 49 |
+
return c10::FunctionSchema(
|
| 50 |
+
"execute",
|
| 51 |
+
/*overload_name=*/"",
|
| 52 |
+
/*arguments=*/{self, handle, input},
|
| 53 |
+
/*returns=*/{output});
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
template <typename TBackendInterface>
|
| 57 |
+
std::function<void(Stack&)> getIsAvailableFunc() {
|
| 58 |
+
return [](Stack& stack) {
|
| 59 |
+
auto self = pop(stack).toCustomClass<TBackendInterface>();
|
| 60 |
+
auto ret = self->is_available();
|
| 61 |
+
push(stack, ret);
|
| 62 |
+
};
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
template <typename TBackendInterface>
|
| 66 |
+
std::function<void(Stack&)> getCompileFunc() {
|
| 67 |
+
return [](Stack& stack) {
|
| 68 |
+
auto method_compile_spec = pop(stack).toGenericDict();
|
| 69 |
+
auto processed = pop(stack);
|
| 70 |
+
auto self = pop(stack).toCustomClass<TBackendInterface>();
|
| 71 |
+
auto ret = self->compile(processed, method_compile_spec);
|
| 72 |
+
push(stack, ret);
|
| 73 |
+
};
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
template <typename TBackendInterface>
|
| 77 |
+
std::function<void(Stack&)> getExecuteFunc() {
|
| 78 |
+
return [](Stack& stack) {
|
| 79 |
+
auto args = pop(stack);
|
| 80 |
+
auto handle = pop(stack);
|
| 81 |
+
auto self = pop(stack);
|
| 82 |
+
auto backend = self.toCustomClass<TBackendInterface>();
|
| 83 |
+
auto res = backend->execute(handle, args.toList());
|
| 84 |
+
push(stack, res);
|
| 85 |
+
};
|
| 86 |
+
}
|
| 87 |
+
} // namespace
|
| 88 |
+
|
| 89 |
+
// Static registration API for backends.
|
| 90 |
+
template <class TBackendInterface>
|
| 91 |
+
class backend {
|
| 92 |
+
static_assert(
|
| 93 |
+
std::is_base_of<PyTorchBackendInterface, TBackendInterface>::value,
|
| 94 |
+
"torch::jit::backend<T> requires T to inherit from PyTorchBackendInterface");
|
| 95 |
+
std::string backend_name_;
|
| 96 |
+
|
| 97 |
+
public:
|
| 98 |
+
// Registers a new backend with /p name, and the given /p preprocess
|
| 99 |
+
// function.
|
| 100 |
+
backend(const std::string& name) : backend_name_(name) {
|
| 101 |
+
static auto cls = torch::class_<TBackendInterface>(kBackendsNamespace, name)
|
| 102 |
+
.def(torch::init<>())
|
| 103 |
+
._def_unboxed(
|
| 104 |
+
"is_available",
|
| 105 |
+
getIsAvailableFunc<TBackendInterface>(),
|
| 106 |
+
getIsAvailableSchema())
|
| 107 |
+
._def_unboxed(
|
| 108 |
+
"compile",
|
| 109 |
+
getCompileFunc<TBackendInterface>(),
|
| 110 |
+
getCompileSchema())
|
| 111 |
+
._def_unboxed(
|
| 112 |
+
"execute",
|
| 113 |
+
getExecuteFunc<TBackendInterface>(),
|
| 114 |
+
getExecuteSchema());
|
| 115 |
+
}
|
| 116 |
+
};
|
| 117 |
+
|
| 118 |
+
} // namespace jit
|
| 119 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/backends/backend_preprocess.h
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/backends/backend_detail.h>
|
| 4 |
+
namespace torch {
|
| 5 |
+
namespace jit {
|
| 6 |
+
class backend_preprocess_register {
|
| 7 |
+
std::string backend_name_;
|
| 8 |
+
|
| 9 |
+
public:
|
| 10 |
+
backend_preprocess_register(
|
| 11 |
+
const std::string& name,
|
| 12 |
+
const detail::BackendPreprocessFunction& preprocess)
|
| 13 |
+
: backend_name_(name) {
|
| 14 |
+
detail::registerBackendPreprocessFunction(name, preprocess);
|
| 15 |
+
}
|
| 16 |
+
};
|
| 17 |
+
} // namespace jit
|
| 18 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/codegen/cuda/interface.h
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Export.h>
|
| 4 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 5 |
+
#include <torch/csrc/jit/passes/pass_manager.h>
|
| 6 |
+
#include <torch/csrc/jit/runtime/profiling_record.h>
|
| 7 |
+
|
| 8 |
+
/*
|
| 9 |
+
* This file contains APIs for cuda fuser;
|
| 10 |
+
*
|
| 11 |
+
* We use an empty static struct to hold the function pointers, which are
|
| 12 |
+
* registered separately. This is to support cpu-only compilation.
|
| 13 |
+
* Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp
|
| 14 |
+
*/
|
| 15 |
+
|
| 16 |
+
namespace torch {
|
| 17 |
+
namespace jit {
|
| 18 |
+
namespace fuser {
|
| 19 |
+
namespace cuda {
|
| 20 |
+
|
| 21 |
+
TORCH_API std::atomic<bool>& getCudaFusionGuardMode();
|
| 22 |
+
|
| 23 |
+
TORCH_API bool getSingletonFusion();
|
| 24 |
+
TORCH_API bool setSingletonFusion(bool value);
|
| 25 |
+
TORCH_API bool getHorizontalFusion();
|
| 26 |
+
TORCH_API bool setHorizontalFusion(bool value);
|
| 27 |
+
|
| 28 |
+
// dummy struct to allow API registration
|
| 29 |
+
struct CudaFuserInterface {
|
| 30 |
+
void (*fn_compile_n)(Node*) = nullptr;
|
| 31 |
+
void (*fn_run_n_s)(const Node*, Stack&) = nullptr;
|
| 32 |
+
void (*fn_fuse_graph)(std::shared_ptr<Graph>&) = nullptr;
|
| 33 |
+
bool (*fn_can_fuse_n)(const Node*) = nullptr;
|
| 34 |
+
void (*fn_insert_profile_inodes)(ProfilingRecord* pr) = nullptr;
|
| 35 |
+
bool (*fn_profile_n)(const Node*) = nullptr;
|
| 36 |
+
bool (*fn_skip_n)(const std::string&, bool flip) = nullptr;
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
// Get interface, this is used by registration and user facing API internally
|
| 40 |
+
TORCH_API CudaFuserInterface* getFuserInterface();
|
| 41 |
+
|
| 42 |
+
TORCH_API void compileFusionGroup(Node* fusion_node);
|
| 43 |
+
TORCH_API void runFusionGroup(const Node* fusion_node, Stack& stack);
|
| 44 |
+
TORCH_API void fuseGraph(std::shared_ptr<Graph>&);
|
| 45 |
+
TORCH_API bool canFuseNode(const Node* node);
|
| 46 |
+
TORCH_API void InsertProfileNodesForCUDAFuser(ProfilingRecord* pr);
|
| 47 |
+
TORCH_API bool profileNode(const Node* node);
|
| 48 |
+
|
| 49 |
+
TORCH_API bool skipNode(const std::string& symbol_str, bool flip = true);
|
| 50 |
+
|
| 51 |
+
TORCH_API bool isEnabled();
|
| 52 |
+
TORCH_API bool setEnabled(bool is_enabled);
|
| 53 |
+
TORCH_API bool canBeEnabled();
|
| 54 |
+
|
| 55 |
+
} // namespace cuda
|
| 56 |
+
} // namespace fuser
|
| 57 |
+
} // namespace jit
|
| 58 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/canonicalize_modified_loop.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <memory>
|
| 3 |
+
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
|
| 6 |
+
namespace torch::jit {
|
| 7 |
+
|
| 8 |
+
struct Graph;
|
| 9 |
+
|
| 10 |
+
// Transforms loops so that they can be represented as python
|
| 11 |
+
// for or while loops
|
| 12 |
+
TORCH_API void CanonicalizeModifiedLoops(std::shared_ptr<Graph>& graph);
|
| 13 |
+
|
| 14 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/concrete_module_type.h
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <torch/csrc/jit/api/module.h>
|
| 5 |
+
#include <torch/csrc/jit/python/pybind_utils.h>
|
| 6 |
+
#include <memory>
|
| 7 |
+
#include <string>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
namespace torch::jit {
|
| 11 |
+
|
| 12 |
+
enum class IterableModuleKind { NONE, LIST, DICT, PARAMLIST, PARAMDICT };
|
| 13 |
+
class ConcreteModuleType;
|
| 14 |
+
|
| 15 |
+
// You can think of an nn.Module as a template that corresponds to a family of
|
| 16 |
+
// JIT types. The template "arguments" are things like the constant values.
|
| 17 |
+
// e.g.
|
| 18 |
+
// class M(nn.Module):
|
| 19 |
+
// __constants__ = ["const"]
|
| 20 |
+
// ...
|
| 21 |
+
//
|
| 22 |
+
// Is similar to writing the following in C++:
|
| 23 |
+
//
|
| 24 |
+
// template<TConst>
|
| 25 |
+
// class M {
|
| 26 |
+
// ...
|
| 27 |
+
// }
|
| 28 |
+
//
|
| 29 |
+
// We need to consider each different member of the type family a different JIT
|
| 30 |
+
// type because, e.g. different constant values lead to different versions of
|
| 31 |
+
// the same method.
|
| 32 |
+
//
|
| 33 |
+
// ConcreteModuleType corresponds to a single member of the type family, with
|
| 34 |
+
// all template arguments fully specified. Two Modules that share a
|
| 35 |
+
// ConcreteModuleType can share a JIT type, and vice versa.
|
| 36 |
+
//
|
| 37 |
+
// Why not just use a JIT type to represent concrete types? Because constants,
|
| 38 |
+
// function attributes, etc. are currently not representable in the type system,
|
| 39 |
+
// so this acts a non-first-class way of tracking concrete types.
|
| 40 |
+
//
|
| 41 |
+
// ConcreteModuleType is also the source of truth for servicing all
|
| 42 |
+
// ModuleValue::attr calls. This is so we can guarantee that if two Module's
|
| 43 |
+
// share a JIT type (and thus a ConcreteModuleType), then they behave the same
|
| 44 |
+
// way when you access attributes on them.
|
| 45 |
+
|
| 46 |
+
// ConcreteModuleType has two phases.
|
| 47 |
+
// 1. Creation: First we build it up, during the ScriptModule conversion
|
| 48 |
+
// process. This is represented by ConcreteModuleTypeBuilder.
|
| 49 |
+
// ...then the converter calls ConcreteModuleTypeBuilder::build(), producing
|
| 50 |
+
// a
|
| 51 |
+
// ConcreteModuleType ready for querying.
|
| 52 |
+
// 2. Querying: We use ConcreteModuleType as a source of truth for
|
| 53 |
+
// ModuleValue::attr calls during method compilation.
|
| 54 |
+
|
| 55 |
+
// Represents a concrete type during in the process for construction. We use
|
| 56 |
+
// this to decide whether we can share types between modules.
|
| 57 |
+
class VISIBILITY_HIDDEN ConcreteModuleTypeBuilder {
|
| 58 |
+
public:
|
| 59 |
+
explicit ConcreteModuleTypeBuilder(py::object pyClass) {
|
| 60 |
+
TORCH_INTERNAL_ASSERT(pyClass);
|
| 61 |
+
pyClass_ = std::move(pyClass);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
void addConstant(std::string name, py::object value);
|
| 65 |
+
void addConstant(std::string name, IValue value);
|
| 66 |
+
void addAttribute(
|
| 67 |
+
std::string name,
|
| 68 |
+
const TypePtr& type,
|
| 69 |
+
bool isParameter,
|
| 70 |
+
bool isBuffer);
|
| 71 |
+
void addFunctionAttribute(
|
| 72 |
+
std::string name,
|
| 73 |
+
const TypePtr& type,
|
| 74 |
+
py::object pyFunction);
|
| 75 |
+
|
| 76 |
+
void addModule(std::string name, std::shared_ptr<ConcreteModuleType> meta);
|
| 77 |
+
|
| 78 |
+
void addForwardHook(py::object hook);
|
| 79 |
+
void addForwardPreHook(py::object pre_hook);
|
| 80 |
+
|
| 81 |
+
void addOverload(
|
| 82 |
+
std::string methodName,
|
| 83 |
+
std::vector<std::string> overloadedMethodNames);
|
| 84 |
+
void addBuiltinFunction(std::string name, const std::string& symbol_name);
|
| 85 |
+
void addFailedAttribute(std::string name, std::string failureReason);
|
| 86 |
+
void addIgnoredAttribute(std::string name);
|
| 87 |
+
void setIterableModuleKind(IterableModuleKind kind);
|
| 88 |
+
|
| 89 |
+
// If a ConcreteModuleType is poisoned, it will never compare equal to any
|
| 90 |
+
// other concrete type
|
| 91 |
+
void setPoisoned();
|
| 92 |
+
|
| 93 |
+
std::shared_ptr<ConcreteModuleType> build() const {
|
| 94 |
+
return std::make_shared<ConcreteModuleType>(*this);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
// This determines whether two modules can share a type. The container structs
|
| 98 |
+
// used by ConcreteModuleType have been defined such that operator==
|
| 99 |
+
// implements a meaningful comparison in that context.
|
| 100 |
+
bool equals(const ConcreteModuleTypeBuilder& other) const;
|
| 101 |
+
|
| 102 |
+
struct FunctionAttribute {
|
| 103 |
+
FunctionTypePtr function_;
|
| 104 |
+
py::object pyFunction_;
|
| 105 |
+
|
| 106 |
+
friend bool operator==(
|
| 107 |
+
const FunctionAttribute& lhs,
|
| 108 |
+
const FunctionAttribute& rhs) {
|
| 109 |
+
// Functions are not first class, so we can't do type comparison like a
|
| 110 |
+
// regular attribute. So we do a pointer equality check on the actual
|
| 111 |
+
// Python function object.
|
| 112 |
+
return lhs.pyFunction_.is(rhs.pyFunction_);
|
| 113 |
+
}
|
| 114 |
+
};
|
| 115 |
+
|
| 116 |
+
struct Attribute {
|
| 117 |
+
Attribute(TypePtr type, bool isParam, bool isBuffer)
|
| 118 |
+
: type_(std::move(type)), isParam_(isParam), isBuffer_(isBuffer) {}
|
| 119 |
+
|
| 120 |
+
friend bool operator==(const Attribute& lhs, const Attribute& rhs) {
|
| 121 |
+
return *(lhs.type_) == *(rhs.type_) && lhs.isParam_ == rhs.isParam_;
|
| 122 |
+
}
|
| 123 |
+
TypePtr type_;
|
| 124 |
+
bool isParam_;
|
| 125 |
+
bool isBuffer_;
|
| 126 |
+
};
|
| 127 |
+
|
| 128 |
+
struct ModuleInfo {
|
| 129 |
+
ModuleInfo(std::string name, std::shared_ptr<ConcreteModuleType> meta)
|
| 130 |
+
: name_(std::move(name)), meta_(std::move(meta)) {}
|
| 131 |
+
|
| 132 |
+
friend bool operator==(const ModuleInfo& lhs, const ModuleInfo& rhs);
|
| 133 |
+
|
| 134 |
+
std::string name_;
|
| 135 |
+
std::shared_ptr<ConcreteModuleType> meta_;
|
| 136 |
+
};
|
| 137 |
+
|
| 138 |
+
private:
|
| 139 |
+
ConcreteModuleTypeBuilder() = default;
|
| 140 |
+
ClassTypePtr createTypeFromThis() const;
|
| 141 |
+
|
| 142 |
+
// If true, this type will never compare equally to anything else. This is
|
| 143 |
+
// used if we want to ensure that this type is not shared (for example, if it
|
| 144 |
+
// came from a traced module)
|
| 145 |
+
bool isPoisoned_ = false;
|
| 146 |
+
|
| 147 |
+
// The value of any constants defined by the module.
|
| 148 |
+
std::unordered_map<std::string, IValue> constants_;
|
| 149 |
+
// The types of any attributes
|
| 150 |
+
OrderedDict<std::string, Attribute> attributes_;
|
| 151 |
+
// Overloads, in the same format as `__overloads__` in Python
|
| 152 |
+
std::unordered_map<std::string, std::vector<std::string>> overloads_;
|
| 153 |
+
// Any attributes we failed to convert to TorchScript, along with a hint as to
|
| 154 |
+
// why
|
| 155 |
+
std::unordered_map<std::string, std::string> failedAttributes_;
|
| 156 |
+
// Any attributes that were marked as ignored. They cannot be used in
|
| 157 |
+
// TorchScript but can still be used in ignored function in Python.
|
| 158 |
+
std::unordered_set<std::string> ignoredAttributes_;
|
| 159 |
+
// Any function attributes. These are special right now because functions are
|
| 160 |
+
// not first-class in the type system.
|
| 161 |
+
std::unordered_map<std::string, FunctionAttribute> functionAttributes_;
|
| 162 |
+
// Function attributes that are calls to builtin functions. These get
|
| 163 |
+
// de-sugared directly into the corresponding aten:: call. The map is
|
| 164 |
+
// attribute name -> aten symbol name
|
| 165 |
+
std::unordered_map<std::string, c10::Symbol> builtinFunctions_;
|
| 166 |
+
// The concrete types of any submodules
|
| 167 |
+
std::vector<ModuleInfo> modules_;
|
| 168 |
+
// Hooks to be called before/after forward when the module
|
| 169 |
+
// is called directly. Used to ensure modules have different types
|
| 170 |
+
// when they have different python hooks
|
| 171 |
+
// Actual hooks are added to ClassType directly during compilation
|
| 172 |
+
std::vector<py::object> forwardHooks_;
|
| 173 |
+
std::vector<py::object> forwardPreHooks_;
|
| 174 |
+
|
| 175 |
+
// If something is a ModuleDict/ModuleList, it means:
|
| 176 |
+
// 1. The order of the submodules matters for comparing the type
|
| 177 |
+
// 2. The compiler is allowed to treat it like a dict/tuple
|
| 178 |
+
IterableModuleKind iterableModuleKind_ = IterableModuleKind::NONE;
|
| 179 |
+
|
| 180 |
+
// The original `nn.Module` class that we derived this ScriptModule from.
|
| 181 |
+
py::object pyClass_;
|
| 182 |
+
|
| 183 |
+
// NOTE: If you ever add any more state to this struct, you need to make sure
|
| 184 |
+
// operator== still makes sense!
|
| 185 |
+
friend ConcreteModuleType;
|
| 186 |
+
};
|
| 187 |
+
|
| 188 |
+
// Represents a finalized concrete type, used to service ModuleValue::attr calls
|
| 189 |
+
// during method compilation.
|
| 190 |
+
class VISIBILITY_HIDDEN ConcreteModuleType {
|
| 191 |
+
public:
|
| 192 |
+
explicit ConcreteModuleType(ConcreteModuleTypeBuilder data);
|
| 193 |
+
|
| 194 |
+
static std::shared_ptr<ConcreteModuleType> fromJitType(TypePtr type);
|
| 195 |
+
|
| 196 |
+
TypePtr getJitType() const;
|
| 197 |
+
std::optional<py::object> getPyClass() const;
|
| 198 |
+
IterableModuleKind getIterableModuleKind() const;
|
| 199 |
+
std::optional<std::vector<std::string>> findOverloads(
|
| 200 |
+
const std::string& name) const;
|
| 201 |
+
std::optional<Function*> findFunctionAttribute(const std::string& name) const;
|
| 202 |
+
std::optional<c10::Symbol> findBuiltinFunction(const std::string& name) const;
|
| 203 |
+
std::shared_ptr<ConcreteModuleType> findSubmoduleConcreteType(
|
| 204 |
+
const std::string& name) const;
|
| 205 |
+
std::optional<std::string> findFailedAttribute(const std::string& name) const;
|
| 206 |
+
bool isIgnoredAttribute(const std::string& name) const;
|
| 207 |
+
|
| 208 |
+
// These getters are only here to return things as types that can be
|
| 209 |
+
// automatically converted by pybind.
|
| 210 |
+
std::unordered_map<std::string, py::object> getConstantsPy() const;
|
| 211 |
+
std::unordered_map<std::string, std::pair<TypePtr, bool>> getAttributesPy()
|
| 212 |
+
const;
|
| 213 |
+
std::vector<std::pair<std::string, std::shared_ptr<ConcreteModuleType>>>
|
| 214 |
+
getModulesPy() const;
|
| 215 |
+
|
| 216 |
+
bool equals(const ConcreteModuleType& other) const {
|
| 217 |
+
if (jitType_ == other.jitType_) {
|
| 218 |
+
// If the computed types are the same, these modules can (obviously) share
|
| 219 |
+
// a type.
|
| 220 |
+
return true;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
return data_.equals(other.data_);
|
| 224 |
+
}
|
| 225 |
+
bool equals(const ConcreteModuleTypeBuilder& other) const {
|
| 226 |
+
return data_.equals(other);
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
void dump() const;
|
| 230 |
+
|
| 231 |
+
private:
|
| 232 |
+
ConcreteModuleType() = default;
|
| 233 |
+
|
| 234 |
+
// The JIT type derived from this ConcreteModuleType.
|
| 235 |
+
ConcreteModuleTypeBuilder data_;
|
| 236 |
+
TypePtr jitType_;
|
| 237 |
+
};
|
| 238 |
+
|
| 239 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/edit_distance.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <cstddef>
|
| 5 |
+
|
| 6 |
+
namespace torch::jit {
|
| 7 |
+
|
| 8 |
+
TORCH_API size_t ComputeEditDistance(
|
| 9 |
+
const char* word1,
|
| 10 |
+
const char* word2,
|
| 11 |
+
size_t maxEditDistance);
|
| 12 |
+
|
| 13 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/error_report.h
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/frontend/tree.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::jit {
|
| 6 |
+
|
| 7 |
+
struct Call {
|
| 8 |
+
std::string fn_name;
|
| 9 |
+
SourceRange caller_range;
|
| 10 |
+
};
|
| 11 |
+
|
| 12 |
+
struct TORCH_API ErrorReport : public std::exception {
|
| 13 |
+
ErrorReport(const ErrorReport& e);
|
| 14 |
+
|
| 15 |
+
explicit ErrorReport(const SourceRange& r);
|
| 16 |
+
explicit ErrorReport(const TreeRef& tree) : ErrorReport(tree->range()) {}
|
| 17 |
+
explicit ErrorReport(const Token& tok) : ErrorReport(tok.range) {}
|
| 18 |
+
|
| 19 |
+
const char* what() const noexcept override;
|
| 20 |
+
|
| 21 |
+
struct TORCH_API CallStack {
|
| 22 |
+
// These functions are used to report why a function was being compiled
|
| 23 |
+
// (i.e. what was the call stack of user functions at compilation time that
|
| 24 |
+
// led to this error)
|
| 25 |
+
CallStack(const std::string& name, const SourceRange& range);
|
| 26 |
+
~CallStack();
|
| 27 |
+
|
| 28 |
+
// Change the range that is relevant for the current function (i.e. after
|
| 29 |
+
// each successful expression compilation, change it to the next expression)
|
| 30 |
+
static void update_pending_range(const SourceRange& range);
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
static std::string current_call_stack();
|
| 34 |
+
|
| 35 |
+
private:
|
| 36 |
+
template <typename T>
|
| 37 |
+
friend const ErrorReport& operator<<(const ErrorReport& e, const T& t);
|
| 38 |
+
|
| 39 |
+
mutable std::stringstream ss;
|
| 40 |
+
OwnedSourceRange context;
|
| 41 |
+
mutable std::string the_message;
|
| 42 |
+
std::vector<Call> error_stack;
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
template <typename T>
|
| 46 |
+
const ErrorReport& operator<<(const ErrorReport& e, const T& t) {
|
| 47 |
+
e.ss << t;
|
| 48 |
+
return e;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/exit_transforms.h
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 5 |
+
|
| 6 |
+
namespace torch::jit {
|
| 7 |
+
|
| 8 |
+
TORCH_API void TransformExits(std::shared_ptr<Graph>& graph);
|
| 9 |
+
|
| 10 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/function_schema.h>
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <variant>
|
| 7 |
+
|
| 8 |
+
namespace torch::jit {
|
| 9 |
+
|
| 10 |
+
// allow_typevars: If true, we assume that lowercase types that we don't
|
| 11 |
+
// understand are type variables. This is only needed for TorchScript (and not
|
| 12 |
+
// not needed for custom ops).
|
| 13 |
+
// If false, we disallow typevars, except in certain cases for BC reason (i.e.
|
| 14 |
+
// your op is in the aten or prim namespace).
|
| 15 |
+
TORCH_API std::variant<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName(
|
| 16 |
+
const std::string& schemaOrName,
|
| 17 |
+
bool allow_typevars = true);
|
| 18 |
+
TORCH_API c10::FunctionSchema parseSchema(
|
| 19 |
+
const std::string& schema,
|
| 20 |
+
bool allow_typevars = true);
|
| 21 |
+
TORCH_API c10::OperatorName parseName(const std::string& name);
|
| 22 |
+
|
| 23 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/inline_loop_condition.h
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <functional>
|
| 3 |
+
#include <memory>
|
| 4 |
+
#include <string>
|
| 5 |
+
|
| 6 |
+
#include <torch/csrc/Export.h>
|
| 7 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 8 |
+
|
| 9 |
+
namespace torch::jit {
|
| 10 |
+
|
| 11 |
+
TORCH_API void InlineLoopCondition(std::shared_ptr<Graph>& graph);
|
| 12 |
+
TORCH_API void InlineBlockBeforeNode(Node* before_node, Block* block);
|
| 13 |
+
|
| 14 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/qualified_name.h>
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
|
| 6 |
+
namespace torch::jit {
|
| 7 |
+
|
| 8 |
+
/**
|
| 9 |
+
* class NameMangler
|
| 10 |
+
*
|
| 11 |
+
* Utility to mangle qualified names in order to make them unique. We use this
|
| 12 |
+
* in various places where we to de-duplicate qualified names.
|
| 13 |
+
*/
|
| 14 |
+
class TORCH_API NameMangler {
|
| 15 |
+
public:
|
| 16 |
+
// Given a qualified name, return a mangled version that is guaranteed to be
|
| 17 |
+
// unique with respect to previous/future calls of `mangled()` on this name
|
| 18 |
+
// mangler instance.
|
| 19 |
+
c10::QualifiedName mangle(const c10::QualifiedName& name);
|
| 20 |
+
|
| 21 |
+
private:
|
| 22 |
+
size_t mangleIndex_ = 0;
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/Export.h>
|
| 3 |
+
#include <torch/csrc/jit/frontend/tree.h>
|
| 4 |
+
#include <torch/csrc/jit/frontend/tree_views.h>
|
| 5 |
+
#include <memory>
|
| 6 |
+
|
| 7 |
+
namespace torch::jit {
|
| 8 |
+
|
| 9 |
+
struct Decl;
|
| 10 |
+
struct ParserImpl;
|
| 11 |
+
struct Lexer;
|
| 12 |
+
|
| 13 |
+
TORCH_API Decl mergeTypesFromTypeComment(
|
| 14 |
+
const Decl& decl,
|
| 15 |
+
const Decl& type_annotation_decl,
|
| 16 |
+
bool is_method);
|
| 17 |
+
|
| 18 |
+
struct TORCH_API Parser {
|
| 19 |
+
explicit Parser(const std::shared_ptr<Source>& src);
|
| 20 |
+
TreeRef parseFunction(bool is_method);
|
| 21 |
+
TreeRef parseClass();
|
| 22 |
+
Decl parseTypeComment();
|
| 23 |
+
Expr parseExp();
|
| 24 |
+
Lexer& lexer();
|
| 25 |
+
~Parser();
|
| 26 |
+
|
| 27 |
+
private:
|
| 28 |
+
std::unique_ptr<ParserImpl> pImpl;
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser_constants.h
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch::jit {
|
| 4 |
+
static const char* valid_single_char_tokens = "+-*/%@()[]:,={}><.?!&^|~";
|
| 5 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/resolver.h
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/jit_type.h>
|
| 4 |
+
#include <ATen/core/qualified_name.h>
|
| 5 |
+
#include <torch/csrc/jit/frontend/sugared_value.h>
|
| 6 |
+
|
| 7 |
+
namespace torch::jit {
|
| 8 |
+
|
| 9 |
+
struct Resolver;
|
| 10 |
+
using ResolverPtr = std::shared_ptr<Resolver>;
|
| 11 |
+
|
| 12 |
+
/**
|
| 13 |
+
* class Resolver
|
| 14 |
+
*
|
| 15 |
+
* Represents an "outer environment" in which we an look up names and return
|
| 16 |
+
* a corresponding SugaredValue. This is used during compilation to resolve
|
| 17 |
+
* references to names which are not defined internal to the graph.
|
| 18 |
+
*
|
| 19 |
+
* Example: PythonResolver looks at the enclosing Python scope for `name`.
|
| 20 |
+
*
|
| 21 |
+
* NOTE: When adding methods, keep this an abstract class (i.e. all new methods
|
| 22 |
+
* should be purely virtual). Resist the urge to provide a default
|
| 23 |
+
* implementation; you should explicitly think about how each resolver would
|
| 24 |
+
* handle the method.
|
| 25 |
+
*/
|
| 26 |
+
struct Resolver {
|
| 27 |
+
virtual ~Resolver() = default;
|
| 28 |
+
|
| 29 |
+
// Resolve a given name to a SugaredValue. This takes the method `m` that the
|
| 30 |
+
// caller is currently constructing, since we may need to insert nodes into
|
| 31 |
+
// the graph to create a value.
|
| 32 |
+
virtual std::shared_ptr<SugaredValue> resolveValue(
|
| 33 |
+
const std::string& name,
|
| 34 |
+
GraphFunction& m,
|
| 35 |
+
const SourceRange& loc) {
|
| 36 |
+
return nullptr;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
// Resolve `name` to a TypePtr.
|
| 40 |
+
virtual TypePtr resolveType(const std::string& name, const SourceRange& loc) {
|
| 41 |
+
return nullptr;
|
| 42 |
+
}
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
// A resolver that only understands "torch.foo()" lookups.
|
| 46 |
+
struct NativeResolver : public Resolver {
|
| 47 |
+
std::shared_ptr<SugaredValue> resolveValue(
|
| 48 |
+
const std::string& name,
|
| 49 |
+
GraphFunction& m,
|
| 50 |
+
const SourceRange& loc) override {
|
| 51 |
+
if (name == "torch") {
|
| 52 |
+
return std::make_shared<BuiltinModule>("aten");
|
| 53 |
+
}
|
| 54 |
+
return nullptr;
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
TypePtr resolveType(const std::string& name, const SourceRange& loc)
|
| 58 |
+
override {
|
| 59 |
+
return nullptr;
|
| 60 |
+
}
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
inline std::shared_ptr<NativeResolver> nativeResolver() {
|
| 64 |
+
return std::make_shared<NativeResolver>();
|
| 65 |
+
}
|
| 66 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_matching.h
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/Export.h>
|
| 3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 4 |
+
#include <torch/csrc/jit/ir/named_value.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/function_schema.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::jit {
|
| 9 |
+
|
| 10 |
+
// Try to match a list of inputs and keyword 'attributes' to this
|
| 11 |
+
// schema. Return the flat list of positional inputs to the call or
|
| 12 |
+
// `std::nullopt` on failure (`failure_messages` contains a good error
|
| 13 |
+
// report in this case)
|
| 14 |
+
|
| 15 |
+
struct MatchedSchema {
|
| 16 |
+
std::vector<Value*> inputs;
|
| 17 |
+
std::vector<TypePtr> return_types;
|
| 18 |
+
c10::OptNameList return_field_names;
|
| 19 |
+
std::string schema_name;
|
| 20 |
+
};
|
| 21 |
+
|
| 22 |
+
TORCH_API bool isBlockListedSchema(const FunctionSchema& schema);
|
| 23 |
+
|
| 24 |
+
TORCH_API MatchedSchema matchSchema(
|
| 25 |
+
const ::c10::FunctionSchema& schema,
|
| 26 |
+
const SourceRange& loc,
|
| 27 |
+
Graph& graph,
|
| 28 |
+
at::ArrayRef<NamedValue> args,
|
| 29 |
+
at::ArrayRef<NamedValue> kwargs,
|
| 30 |
+
const std::optional<NamedValue>& self = std::nullopt);
|
| 31 |
+
|
| 32 |
+
TORCH_API std::pair<size_t, MatchedSchema> matchSchemas(
|
| 33 |
+
const std::vector<const ::c10::FunctionSchema*>& schemas,
|
| 34 |
+
const SourceRange& loc,
|
| 35 |
+
Graph& graph,
|
| 36 |
+
at::ArrayRef<NamedValue> args,
|
| 37 |
+
at::ArrayRef<NamedValue> kwargs,
|
| 38 |
+
const std::optional<NamedValue>& self = std::nullopt,
|
| 39 |
+
bool render_errors = false);
|
| 40 |
+
|
| 41 |
+
TORCH_API bool convertibleToList(
|
| 42 |
+
const TypePtr& type,
|
| 43 |
+
const TypePtr& list_type_);
|
| 44 |
+
|
| 45 |
+
TORCH_API std::string getFullSchemaName(const ::c10::FunctionSchema& schema);
|
| 46 |
+
|
| 47 |
+
TORCH_API Value* emitBuiltinCall(
|
| 48 |
+
const SourceRange& loc,
|
| 49 |
+
Graph& graph,
|
| 50 |
+
Symbol name,
|
| 51 |
+
at::ArrayRef<NamedValue> args,
|
| 52 |
+
at::ArrayRef<NamedValue> kwargs,
|
| 53 |
+
const std::optional<NamedValue>& self = std::nullopt);
|
| 54 |
+
|
| 55 |
+
TORCH_API std::optional<size_t> findInputWithName(
|
| 56 |
+
const std::string& name,
|
| 57 |
+
at::ArrayRef<NamedValue> kwargs,
|
| 58 |
+
bool is_aten = false);
|
| 59 |
+
|
| 60 |
+
// applies implicit conversion from value trying to turn it into type
|
| 61 |
+
// concrete_type it succeeds if the return_value->isSubtypeOf(concrete_type)
|
| 62 |
+
TORCH_API Value* tryConvertToType(
|
| 63 |
+
const SourceRange& loc,
|
| 64 |
+
Graph& graph,
|
| 65 |
+
const TypePtr& concrete_type,
|
| 66 |
+
Value* value,
|
| 67 |
+
bool allow_conversions);
|
| 68 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/alias_info.h>
|
| 4 |
+
#include <ATen/core/jit_type.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <c10/util/FunctionRef.h>
|
| 7 |
+
#include <torch/csrc/jit/frontend/lexer.h>
|
| 8 |
+
|
| 9 |
+
namespace torch::jit {
|
| 10 |
+
|
| 11 |
+
using TypePtr = c10::TypePtr;
|
| 12 |
+
|
| 13 |
+
struct TORCH_API SchemaTypeParser {
|
| 14 |
+
TypePtr parseBaseType();
|
| 15 |
+
std::optional<c10::AliasInfo> parseAliasAnnotation();
|
| 16 |
+
std::pair<TypePtr, std::optional<c10::AliasInfo>> parseType();
|
| 17 |
+
std::tuple</*fake*/ TypePtr, /*real*/ TypePtr, std::optional<c10::AliasInfo>>
|
| 18 |
+
parseFakeAndRealType();
|
| 19 |
+
std::optional<at::ScalarType> parseTensorDType(const std::string& dtype);
|
| 20 |
+
TypePtr parseRefinedTensor();
|
| 21 |
+
|
| 22 |
+
SchemaTypeParser(
|
| 23 |
+
Lexer& L,
|
| 24 |
+
bool parse_complete_tensor_types,
|
| 25 |
+
bool allow_typevars)
|
| 26 |
+
: complete_tensor_types(parse_complete_tensor_types),
|
| 27 |
+
L(L),
|
| 28 |
+
allow_typevars_(allow_typevars) {}
|
| 29 |
+
|
| 30 |
+
private:
|
| 31 |
+
std::optional<bool> tryToParseRequiresGrad();
|
| 32 |
+
std::optional<c10::Device> tryToParseDeviceType();
|
| 33 |
+
void parseList(
|
| 34 |
+
int begin,
|
| 35 |
+
int sep,
|
| 36 |
+
int end,
|
| 37 |
+
c10::function_ref<void()> callback);
|
| 38 |
+
|
| 39 |
+
bool complete_tensor_types;
|
| 40 |
+
Lexer& L;
|
| 41 |
+
size_t next_id = 0;
|
| 42 |
+
bool allow_typevars_;
|
| 43 |
+
};
|
| 44 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_range.h
ADDED
|
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/util/Exception.h>
|
| 3 |
+
#include <optional>
|
| 4 |
+
|
| 5 |
+
#include <algorithm>
|
| 6 |
+
#include <iterator>
|
| 7 |
+
#include <memory>
|
| 8 |
+
#include <ostream>
|
| 9 |
+
#include <sstream>
|
| 10 |
+
#include <unordered_map>
|
| 11 |
+
|
| 12 |
+
namespace torch::jit {
|
| 13 |
+
|
| 14 |
+
class SourceRangeUnpickler;
|
| 15 |
+
struct SourceRange;
|
| 16 |
+
|
| 17 |
+
// A stringlike class backed by a vector of string_view
|
| 18 |
+
// the string represented are logically the concatenation of the string_views
|
| 19 |
+
// This has advantage of not needing continues memory.
|
| 20 |
+
struct TORCH_API StringCordView {
|
| 21 |
+
StringCordView();
|
| 22 |
+
StringCordView(const StringCordView&) = default;
|
| 23 |
+
StringCordView(StringCordView&&) noexcept = default;
|
| 24 |
+
StringCordView(
|
| 25 |
+
std::vector<c10::string_view> inputs,
|
| 26 |
+
std::vector<std::shared_ptr<std::string>> ownerships);
|
| 27 |
+
|
| 28 |
+
StringCordView& operator=(const StringCordView&) = default;
|
| 29 |
+
StringCordView& operator=(StringCordView&&) noexcept = default;
|
| 30 |
+
|
| 31 |
+
size_t size() const {
|
| 32 |
+
return accumulated_sizes_.back();
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
size_t find(const std::string& tok, size_t start) const;
|
| 36 |
+
size_t find_regex(const std::string& tok, size_t start) const;
|
| 37 |
+
StringCordView substr(size_t start, size_t size) const;
|
| 38 |
+
|
| 39 |
+
char at(size_t index) const {
|
| 40 |
+
return *iter_for_pos(index);
|
| 41 |
+
}
|
| 42 |
+
char operator[](size_t index) const {
|
| 43 |
+
return at(index);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
std::string str() const {
|
| 47 |
+
std::stringstream ss;
|
| 48 |
+
for (auto s : pieces_) {
|
| 49 |
+
ss << std::string(s);
|
| 50 |
+
}
|
| 51 |
+
return ss.str();
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
bool operator==(const std::string& rhs) const;
|
| 55 |
+
|
| 56 |
+
bool operator==(const StringCordView& rhs) const;
|
| 57 |
+
|
| 58 |
+
c10::string_view piece(size_t index) const {
|
| 59 |
+
return pieces_[index];
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
struct Iterator {
|
| 63 |
+
Iterator(
|
| 64 |
+
const StringCordView* str,
|
| 65 |
+
size_t start_line,
|
| 66 |
+
size_t start_pos,
|
| 67 |
+
size_t size)
|
| 68 |
+
: line_(start_line), pos_(start_pos), str_(str), size_(size) {}
|
| 69 |
+
explicit Iterator(const StringCordView* str)
|
| 70 |
+
: Iterator(str, 0, 0, str->size()) {}
|
| 71 |
+
|
| 72 |
+
Iterator() : Iterator(nullptr, 0, 0, 0) {}
|
| 73 |
+
|
| 74 |
+
Iterator(const Iterator&) = default;
|
| 75 |
+
Iterator(Iterator&&) = default;
|
| 76 |
+
Iterator& operator=(const Iterator&) = default;
|
| 77 |
+
Iterator& operator=(Iterator&&) = default;
|
| 78 |
+
|
| 79 |
+
Iterator operator++() {
|
| 80 |
+
if (size_ == 0) {
|
| 81 |
+
return *this;
|
| 82 |
+
}
|
| 83 |
+
if ((pos_ + 1) < str_->pieces_[line_].size()) {
|
| 84 |
+
pos_++;
|
| 85 |
+
} else {
|
| 86 |
+
line_++;
|
| 87 |
+
pos_ = 0;
|
| 88 |
+
}
|
| 89 |
+
return *this;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
Iterator operator++(int) {
|
| 93 |
+
Iterator prev(*this);
|
| 94 |
+
++(*this);
|
| 95 |
+
return prev;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
Iterator next_iter() const {
|
| 99 |
+
Iterator next(*this);
|
| 100 |
+
++next;
|
| 101 |
+
return next;
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
Iterator& operator+=(size_t num) {
|
| 105 |
+
if (!has_next()) {
|
| 106 |
+
return *this;
|
| 107 |
+
}
|
| 108 |
+
size_t target_pos = pos_ + num;
|
| 109 |
+
if (target_pos >= str_->accumulated_sizes_[line_] &&
|
| 110 |
+
(line_ + 1) < str_->accumulated_sizes_.size() &&
|
| 111 |
+
target_pos < str_->accumulated_sizes_[line_ + 1]) {
|
| 112 |
+
pos_ = target_pos;
|
| 113 |
+
return *this;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
size_t target_abs_pos = pos() + num;
|
| 117 |
+
*this = str_->iter_for_pos(target_abs_pos);
|
| 118 |
+
return *this;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
bool operator==(const Iterator& rhs) const {
|
| 122 |
+
if (!has_next() && !rhs.has_next()) {
|
| 123 |
+
return true;
|
| 124 |
+
}
|
| 125 |
+
return (str_ == rhs.str_) && (line_ == rhs.line_) && (pos_ == rhs.pos_);
|
| 126 |
+
}
|
| 127 |
+
bool operator!=(const Iterator& rhs) {
|
| 128 |
+
return !((*this) == rhs);
|
| 129 |
+
}
|
| 130 |
+
bool has_next() const {
|
| 131 |
+
return size_ > 0 && (line_ < str_->pieces_.size());
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
char operator*() const {
|
| 135 |
+
TORCH_INTERNAL_ASSERT(line_ < str_->pieces_.size());
|
| 136 |
+
TORCH_INTERNAL_ASSERT(pos_ < str_->pieces_[line_].size());
|
| 137 |
+
return str_->pieces_[line_].at(pos_);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
// returns rest of the line of the current iterator
|
| 141 |
+
c10::string_view rest_line() const {
|
| 142 |
+
if (line_ >= str_->pieces_.size()) {
|
| 143 |
+
return "";
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
c10::string_view cur_line = str_->pieces_[line_];
|
| 147 |
+
return cur_line.substr(pos_, std::string::npos);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
size_t pos() const {
|
| 151 |
+
if (size_ == 0) {
|
| 152 |
+
return 0;
|
| 153 |
+
}
|
| 154 |
+
return str_->accumulated_sizes_[line_] + pos_;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
private:
|
| 158 |
+
size_t line_;
|
| 159 |
+
size_t pos_;
|
| 160 |
+
const StringCordView* str_;
|
| 161 |
+
size_t size_;
|
| 162 |
+
friend struct StringCordView;
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
Iterator begin() const {
|
| 166 |
+
return Iterator(this, 0, 0, size());
|
| 167 |
+
}
|
| 168 |
+
Iterator end() const {
|
| 169 |
+
return Iterator(this, pieces_.size(), 0, 0);
|
| 170 |
+
}
|
| 171 |
+
Iterator iter_for_pos(size_t pos) const;
|
| 172 |
+
|
| 173 |
+
private:
|
| 174 |
+
std::vector<c10::string_view> pieces_;
|
| 175 |
+
std::vector<size_t> accumulated_sizes_;
|
| 176 |
+
std::vector<std::shared_ptr<std::string>> owned_strings_;
|
| 177 |
+
};
|
| 178 |
+
|
| 179 |
+
// Source represents a code segment. It keeps track of:
|
| 180 |
+
// - text_view : the view into text of the code segment
|
| 181 |
+
// - filename (optional) : if present, represents the name of the file from
|
| 182 |
+
// which the code segment originated.
|
| 183 |
+
// - starting_line_no : represents the line in the original file where the
|
| 184 |
+
// code segment started.
|
| 185 |
+
struct TORCH_API Source {
|
| 186 |
+
// Whether or not Source should copy the string passed in the constructor.
|
| 187 |
+
enum CopiesString { COPIES_STRING, DONT_COPY };
|
| 188 |
+
|
| 189 |
+
explicit Source(
|
| 190 |
+
c10::string_view text_view,
|
| 191 |
+
std::optional<std::string> filename = std::nullopt,
|
| 192 |
+
size_t starting_line_no = 0,
|
| 193 |
+
std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr,
|
| 194 |
+
CopiesString copies_str = COPIES_STRING)
|
| 195 |
+
: filename_(std::move(filename)),
|
| 196 |
+
starting_line_no_(starting_line_no),
|
| 197 |
+
gen_ranges_(std::move(gen_ranges)) {
|
| 198 |
+
if (copies_str == COPIES_STRING) {
|
| 199 |
+
std::shared_ptr<std::string> allocated_str =
|
| 200 |
+
std::make_shared<std::string>(text_view.data(), text_view.size());
|
| 201 |
+
text_view_ = StringCordView({*allocated_str}, {allocated_str});
|
| 202 |
+
} else {
|
| 203 |
+
text_view_ = StringCordView({text_view}, {});
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
calc_line_start_offsets();
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
explicit Source(
|
| 210 |
+
StringCordView str,
|
| 211 |
+
std::optional<std::string> filename = std::nullopt,
|
| 212 |
+
size_t starting_line_no = 0,
|
| 213 |
+
std::shared_ptr<SourceRangeUnpickler> gen_ranges = nullptr)
|
| 214 |
+
: text_view_(std::move(str)),
|
| 215 |
+
filename_(std::move(filename)),
|
| 216 |
+
starting_line_no_(starting_line_no),
|
| 217 |
+
gen_ranges_(std::move(gen_ranges)) {
|
| 218 |
+
calc_line_start_offsets();
|
| 219 |
+
}
|
| 220 |
+
// Given a line number (within source_), return the byte offset of the
|
| 221 |
+
// beginning of that line.
|
| 222 |
+
size_t offset_for_line(size_t line) const {
|
| 223 |
+
return line_starting_offsets_.at(line);
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
// Returns number of lines present.
|
| 227 |
+
size_t num_lines() const {
|
| 228 |
+
return line_starting_offsets_.size();
|
| 229 |
+
}
|
| 230 |
+
|
| 231 |
+
// Calculate the line (within the code segment) on which `offset` resides.
|
| 232 |
+
size_t lineno_for_offset(size_t offset) const {
|
| 233 |
+
auto iter = std::upper_bound(
|
| 234 |
+
line_starting_offsets_.begin(), line_starting_offsets_.end(), offset);
|
| 235 |
+
return iter - line_starting_offsets_.begin() - 1;
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
// Calculate the line (within the original source file, if present) on which
|
| 239 |
+
// `lineno` resides.
|
| 240 |
+
size_t lineno_to_source_lineno(size_t lineno) const {
|
| 241 |
+
if (filename_) {
|
| 242 |
+
return lineno + starting_line_no_;
|
| 243 |
+
} else {
|
| 244 |
+
return lineno;
|
| 245 |
+
}
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
StringCordView get_line(size_t lineno) const {
|
| 249 |
+
auto start = offset_for_line(lineno);
|
| 250 |
+
auto size = (lineno + 1) < num_lines() ? offset_for_line(lineno + 1) - start
|
| 251 |
+
: text_view_.size() - start;
|
| 252 |
+
return text_view_.substr(start, size);
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
const StringCordView& text_str() const {
|
| 256 |
+
return text_view_;
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
char char_at(size_t index) const {
|
| 260 |
+
return text_view_.at(index);
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
size_t size() const {
|
| 264 |
+
return text_view_.size();
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
std::optional<std::string>& filename() {
|
| 268 |
+
return filename_;
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
size_t starting_line_no() const {
|
| 272 |
+
return starting_line_no_;
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
std::optional<SourceRange> findSourceRangeThatGenerated(
|
| 276 |
+
const SourceRange& range);
|
| 277 |
+
|
| 278 |
+
~Source() = default;
|
| 279 |
+
|
| 280 |
+
private:
|
| 281 |
+
void calc_line_start_offsets() {
|
| 282 |
+
line_starting_offsets_.clear();
|
| 283 |
+
line_starting_offsets_.push_back(0);
|
| 284 |
+
size_t pos = 0;
|
| 285 |
+
while ((pos = text_view_.find("\n", pos)) != std::string::npos) {
|
| 286 |
+
line_starting_offsets_.push_back(++pos);
|
| 287 |
+
}
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
StringCordView text_view_;
|
| 291 |
+
|
| 292 |
+
std::optional<std::string> filename_;
|
| 293 |
+
// If filename_ is not present, starting_line_no_ is don't care
|
| 294 |
+
size_t starting_line_no_;
|
| 295 |
+
// Starting offsets for lines into the source. e.g. line 0 starts at
|
| 296 |
+
// line_starting_offsets_[0], etc.
|
| 297 |
+
std::vector<size_t> line_starting_offsets_;
|
| 298 |
+
|
| 299 |
+
std::shared_ptr<SourceRangeUnpickler> gen_ranges_;
|
| 300 |
+
};
|
| 301 |
+
|
| 302 |
+
// A SourceRange is a reference to subset of a Source, specified by `start` and
|
| 303 |
+
// `end` byte offsets into the source text.
|
| 304 |
+
struct TORCH_API SourceRange {
|
| 305 |
+
SourceRange(std::shared_ptr<Source> source_view, size_t start_, size_t end_)
|
| 306 |
+
: source_view_(std::move(source_view)), start_(start_), end_(end_) {
|
| 307 |
+
if (source_view_) {
|
| 308 |
+
start_iter_ = source_view_->text_str().iter_for_pos(start_);
|
| 309 |
+
}
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
SourceRange() : source_view_(nullptr), start_(0), end_(0) {}
|
| 313 |
+
|
| 314 |
+
SourceRange(
|
| 315 |
+
std::shared_ptr<Source> source_view_,
|
| 316 |
+
StringCordView::Iterator start_iter,
|
| 317 |
+
size_t end_)
|
| 318 |
+
: source_view_(std::move(source_view_)),
|
| 319 |
+
start_(start_iter.pos()),
|
| 320 |
+
end_(end_),
|
| 321 |
+
start_iter_(start_iter) {}
|
| 322 |
+
|
| 323 |
+
const c10::string_view token_text() const {
|
| 324 |
+
size_t size = end() - start();
|
| 325 |
+
return start_iter_.rest_line().substr(0, size);
|
| 326 |
+
}
|
| 327 |
+
|
| 328 |
+
const StringCordView text() const {
|
| 329 |
+
return source_view_->text_str().substr(start(), end() - start());
|
| 330 |
+
}
|
| 331 |
+
size_t size() const {
|
| 332 |
+
return end() - start();
|
| 333 |
+
}
|
| 334 |
+
static const size_t CONTEXT = 3;
|
| 335 |
+
void highlight(std::ostream& out) const;
|
| 336 |
+
|
| 337 |
+
// Customizable version of 'highlight' method.
|
| 338 |
+
void print_with_context(
|
| 339 |
+
std::ostream& out,
|
| 340 |
+
size_t context,
|
| 341 |
+
bool highlight,
|
| 342 |
+
const std::string& funcname) const;
|
| 343 |
+
|
| 344 |
+
const std::shared_ptr<Source>& source() const {
|
| 345 |
+
return source_view_;
|
| 346 |
+
}
|
| 347 |
+
size_t start() const {
|
| 348 |
+
return start_;
|
| 349 |
+
}
|
| 350 |
+
size_t end() const {
|
| 351 |
+
return end_;
|
| 352 |
+
}
|
| 353 |
+
std::string str() const {
|
| 354 |
+
std::stringstream ss;
|
| 355 |
+
highlight(ss);
|
| 356 |
+
return ss.str();
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
std::optional<std::tuple<std::string, size_t, size_t>> file_line_col() const {
|
| 360 |
+
if (!source_view_ || !source()->filename()) {
|
| 361 |
+
return std::nullopt;
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
auto lineno = source_view_->lineno_for_offset(start_);
|
| 365 |
+
auto col_offset = (int)start_ - (int)source_view_->offset_for_line(lineno);
|
| 366 |
+
// TODO: std::optional<>::value returns an rvalue ref so can't use it here??
|
| 367 |
+
return std::make_tuple<std::string, size_t, size_t>(
|
| 368 |
+
source_view_->filename().value_or(""),
|
| 369 |
+
source_view_->lineno_to_source_lineno(lineno),
|
| 370 |
+
(size_t)col_offset);
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
bool operator==(const SourceRange& rhs) const {
|
| 374 |
+
return start() == rhs.start() && end() == rhs.end() &&
|
| 375 |
+
source() == rhs.source();
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
bool operator!=(const SourceRange& rhs) const {
|
| 379 |
+
return !(*this == rhs);
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
std::optional<SourceRange> findSourceRangeThatGenerated() const {
|
| 383 |
+
if (!source_view_) {
|
| 384 |
+
return std::nullopt;
|
| 385 |
+
}
|
| 386 |
+
return source_view_->findSourceRangeThatGenerated(*this);
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
protected:
|
| 390 |
+
std::shared_ptr<Source> source_view_;
|
| 391 |
+
|
| 392 |
+
private:
|
| 393 |
+
size_t start_;
|
| 394 |
+
size_t end_;
|
| 395 |
+
StringCordView::Iterator start_iter_;
|
| 396 |
+
};
|
| 397 |
+
|
| 398 |
+
// OwnedSourceRange is just like a SourceRange except that it owns a `Source`
|
| 399 |
+
// instead of `Source`. Thus OwnedSourceRange owns a copy of source text.
|
| 400 |
+
struct OwnedSourceRange : public SourceRange {
|
| 401 |
+
explicit OwnedSourceRange(const SourceRange& source_range)
|
| 402 |
+
: SourceRange(source_range) {
|
| 403 |
+
const auto& source = source_range.source();
|
| 404 |
+
if (source) {
|
| 405 |
+
source_view_ = std::make_shared<Source>(
|
| 406 |
+
source->text_str().str(),
|
| 407 |
+
source->filename(),
|
| 408 |
+
source->starting_line_no());
|
| 409 |
+
}
|
| 410 |
+
}
|
| 411 |
+
};
|
| 412 |
+
|
| 413 |
+
struct TORCH_API SourceRangeHasher {
|
| 414 |
+
public:
|
| 415 |
+
size_t operator()(const torch::jit::SourceRange& key) const;
|
| 416 |
+
};
|
| 417 |
+
|
| 418 |
+
struct StackEntry {
|
| 419 |
+
std::string filename;
|
| 420 |
+
SourceRange range;
|
| 421 |
+
};
|
| 422 |
+
|
| 423 |
+
TORCH_API void format_stack_trace(
|
| 424 |
+
std::ostream& out,
|
| 425 |
+
const std::vector<StackEntry>& entries);
|
| 426 |
+
|
| 427 |
+
inline std::ostream& operator<<(std::ostream& out, const SourceRange& range) {
|
| 428 |
+
range.highlight(out);
|
| 429 |
+
return out;
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
// A pair of (byte offset, SourceRange) describing a specific segment
|
| 433 |
+
// of the output stream
|
| 434 |
+
struct TaggedRange {
|
| 435 |
+
TaggedRange(size_t bytes, SourceRange range)
|
| 436 |
+
: bytes(bytes), range(std::move(range)) {}
|
| 437 |
+
size_t bytes;
|
| 438 |
+
SourceRange range;
|
| 439 |
+
};
|
| 440 |
+
using SourceRangeRecords = std::vector<TaggedRange>;
|
| 441 |
+
using SourceRangeTagMap =
|
| 442 |
+
std::unordered_map<SourceRange, int64_t, SourceRangeHasher>;
|
| 443 |
+
|
| 444 |
+
} // namespace torch::jit
|
| 445 |
+
|
| 446 |
+
namespace std {
|
| 447 |
+
template <>
|
| 448 |
+
struct iterator_traits<torch::jit::StringCordView::Iterator> {
|
| 449 |
+
using value_type = char;
|
| 450 |
+
using difference_type = ptrdiff_t;
|
| 451 |
+
using pointer = char*;
|
| 452 |
+
using reference = char&;
|
| 453 |
+
using iterator_category = std::forward_iterator_tag;
|
| 454 |
+
};
|
| 455 |
+
} // namespace std
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/source_ref.h
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <functional>
|
| 4 |
+
#include <memory>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/ivalue.h>
|
| 7 |
+
#include <c10/macros/Export.h>
|
| 8 |
+
#include <torch/csrc/jit/frontend/source_range.h>
|
| 9 |
+
|
| 10 |
+
namespace torch::jit {
|
| 11 |
+
|
| 12 |
+
/**
|
| 13 |
+
* SourceRef does two things:
|
| 14 |
+
* 1. Owns a Source object.
|
| 15 |
+
* 2. Serves as lookup key to the owned Source in associative containers, for
|
| 16 |
+
* runtime data aggregation.
|
| 17 |
+
* We don't want to use std::shared_ptr<Source> directly because we want to
|
| 18 |
+
* support heteogeneous lookup, and also shared_ptr is an implementation detail
|
| 19 |
+
* which should be encapsulated.
|
| 20 |
+
*/
|
| 21 |
+
class TORCH_API SourceRef : public CustomClassHolder {
|
| 22 |
+
public:
|
| 23 |
+
explicit SourceRef(std::shared_ptr<Source> source_view)
|
| 24 |
+
: source_view_(std::move(source_view)) {}
|
| 25 |
+
bool operator==(const SourceRef& other) const {
|
| 26 |
+
return source_view_ == other.source_view_;
|
| 27 |
+
}
|
| 28 |
+
bool operator<(const Source& other) const {
|
| 29 |
+
return source_view_.get() < &other;
|
| 30 |
+
}
|
| 31 |
+
friend bool operator<(const Source& other, const SourceRef& self) {
|
| 32 |
+
return &other < self.source_view_.get();
|
| 33 |
+
}
|
| 34 |
+
bool operator<(const SourceRef& other) const {
|
| 35 |
+
return *this < *other.source_view_;
|
| 36 |
+
}
|
| 37 |
+
const Source* operator->() const {
|
| 38 |
+
return source_view_.get();
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
private:
|
| 42 |
+
std::shared_ptr<Source> source_view_;
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/strtod.h
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::jit {
|
| 6 |
+
|
| 7 |
+
TORCH_API double strtod_c(const char* nptr, char** endptr);
|
| 8 |
+
TORCH_API float strtof_c(const char* nptr, char** endptr);
|
| 9 |
+
|
| 10 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tracer.h
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Dimname.h>
|
| 4 |
+
#include <ATen/core/class_type.h>
|
| 5 |
+
#include <ATen/core/jit_type.h>
|
| 6 |
+
#include <ATen/core/stack.h>
|
| 7 |
+
#include <ATen/core/symbol.h>
|
| 8 |
+
#include <c10/util/Exception.h>
|
| 9 |
+
#include <torch/csrc/Export.h>
|
| 10 |
+
|
| 11 |
+
#include <torch/csrc/jit/frontend/source_range.h>
|
| 12 |
+
#include <torch/csrc/utils/variadic.h>
|
| 13 |
+
|
| 14 |
+
#include <cstdint>
|
| 15 |
+
#include <memory>
|
| 16 |
+
#include <unordered_map>
|
| 17 |
+
#include <vector>
|
| 18 |
+
|
| 19 |
+
namespace torch::jit {
|
| 20 |
+
struct Node;
|
| 21 |
+
struct Value;
|
| 22 |
+
struct Graph;
|
| 23 |
+
struct Module;
|
| 24 |
+
|
| 25 |
+
namespace tracer {
|
| 26 |
+
|
| 27 |
+
using ::c10::ivalue::Shared;
|
| 28 |
+
|
| 29 |
+
using ::c10::IValue;
|
| 30 |
+
using ::c10::ivalue::Future;
|
| 31 |
+
|
| 32 |
+
using ::c10::ArrayRef;
|
| 33 |
+
using ::c10::TupleType;
|
| 34 |
+
using ::c10::TupleTypePtr;
|
| 35 |
+
using ::c10::ivalue::ConstantString;
|
| 36 |
+
|
| 37 |
+
using torch::autograd::Variable;
|
| 38 |
+
using variable_list = std::vector<Variable>;
|
| 39 |
+
|
| 40 |
+
TORCH_API std::atomic<bool>& getTracerStateWarnMode();
|
| 41 |
+
|
| 42 |
+
struct TORCH_API TracingState
|
| 43 |
+
: public std::enable_shared_from_this<TracingState> {
|
| 44 |
+
TracingState();
|
| 45 |
+
~TracingState();
|
| 46 |
+
|
| 47 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 48 |
+
std::shared_ptr<Graph> graph;
|
| 49 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 50 |
+
bool warn = getTracerStateWarnMode();
|
| 51 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 52 |
+
bool strict = true;
|
| 53 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 54 |
+
bool force_outplace = false;
|
| 55 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 56 |
+
std::function<std::string(const Variable& var)> lookup_var_name_fn =
|
| 57 |
+
[](const Variable& var) { return ""; };
|
| 58 |
+
|
| 59 |
+
void enterFrame() {
|
| 60 |
+
env_stack.emplace_back();
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
void leaveFrame() {
|
| 64 |
+
env_stack.pop_back();
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
void setValue(const IValue& v, Value* value);
|
| 68 |
+
void delValue(const IValue& var);
|
| 69 |
+
Value* getValue(const IValue& var);
|
| 70 |
+
Value* getOutput(const IValue& var, size_t i);
|
| 71 |
+
bool hasValue(const IValue& var) const;
|
| 72 |
+
|
| 73 |
+
Node* createNode(c10::Symbol op_name, size_t num_outputs);
|
| 74 |
+
void insertNode(Node* node);
|
| 75 |
+
|
| 76 |
+
private:
|
| 77 |
+
using WeakIValue = at::WeakIValue;
|
| 78 |
+
|
| 79 |
+
struct WeakIValueHasher {
|
| 80 |
+
size_t operator()(const WeakIValue& t) const {
|
| 81 |
+
return t.hash();
|
| 82 |
+
}
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
struct WeakIValueEq {
|
| 86 |
+
bool operator()(const WeakIValue& t1, const WeakIValue& t2) const {
|
| 87 |
+
return t1.isSameIdentity(t2);
|
| 88 |
+
}
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
using Frame =
|
| 92 |
+
std::unordered_map<WeakIValue, Value*, WeakIValueHasher, WeakIValueEq>;
|
| 93 |
+
std::vector<Frame> env_stack;
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
// This is meant to be used as a thread local place, where we can store extra
|
| 97 |
+
// info that gets lost when we call into ATen from Python bindings. One example
|
| 98 |
+
// for when this happens is when we get an IntArrayRef argument with e.g. sizes
|
| 99 |
+
// for view. When tracing, those might be tensors, which let us encode extra
|
| 100 |
+
// data dependencies, but once they get to the ATen call where we actually have
|
| 101 |
+
// the tracing logic, they get converted into a raw IntArrayRef, and we loose
|
| 102 |
+
// all information. To prevent this, we temporarily stash it in here.
|
| 103 |
+
struct ArgumentStash {
|
| 104 |
+
struct IntArrayRefTrace : std::vector<Value*> {
|
| 105 |
+
IntArrayRefTrace(size_t size) : std::vector<Value*>(size, nullptr) {}
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
static bool empty() {
|
| 109 |
+
return stash.intlists.empty();
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
TORCH_API static void stashIntArrayRefElem(
|
| 113 |
+
const std::string& arg_name,
|
| 114 |
+
size_t size,
|
| 115 |
+
size_t idx,
|
| 116 |
+
const Variable& var);
|
| 117 |
+
|
| 118 |
+
static bool hasIntArrayRef(const std::string& arg_name) {
|
| 119 |
+
return stash.intlists.count(arg_name) > 0;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
static IntArrayRefTrace popIntArrayRef(const std::string& arg_name) {
|
| 123 |
+
auto info = std::move(stash.intlists.at(arg_name));
|
| 124 |
+
stash.intlists.erase(arg_name);
|
| 125 |
+
return info;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
// Value stashing: Use these methods to stash arguments which correspond
|
| 129 |
+
// to regular Value*'s in the graph. i.e. they don't require special
|
| 130 |
+
// handling like in the case of IntArrayRefs
|
| 131 |
+
TORCH_API static void stashValue(
|
| 132 |
+
const std::string& arg_name,
|
| 133 |
+
size_t idx,
|
| 134 |
+
const Variable& var,
|
| 135 |
+
const c10::TypePtr& type = nullptr);
|
| 136 |
+
|
| 137 |
+
static bool hasValue(const std::string& arg_name) {
|
| 138 |
+
return stash.values.count(arg_name) > 0;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
static Value* popValue(const std::string& arg_name) {
|
| 142 |
+
auto info = stash.values.at(arg_name);
|
| 143 |
+
stash.values.erase(arg_name);
|
| 144 |
+
return info;
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
private:
|
| 148 |
+
static thread_local ArgumentStash stash;
|
| 149 |
+
std::unordered_map<std::string, IntArrayRefTrace> intlists;
|
| 150 |
+
std::unordered_map<std::string, Value*> values;
|
| 151 |
+
};
|
| 152 |
+
|
| 153 |
+
// Retrieve or set the current tracing state. Returns a nullptr if tracing is
|
| 154 |
+
// disabled.
|
| 155 |
+
TORCH_API const std::shared_ptr<TracingState>& getTracingState();
|
| 156 |
+
TORCH_API void setTracingState(std::shared_ptr<TracingState> state);
|
| 157 |
+
|
| 158 |
+
inline bool isTracing() {
|
| 159 |
+
return static_cast<bool>(getTracingState());
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
using warn_fn_type = void (*)(const std::string& msg);
|
| 163 |
+
TORCH_API extern const char* WARN_PYTHON_DATAFLOW;
|
| 164 |
+
TORCH_API extern const char* WARN_CONSTRUCTOR;
|
| 165 |
+
TORCH_API extern const char* WARN_RESIZE;
|
| 166 |
+
TORCH_API extern const char* STRICT_TRACER_MSG;
|
| 167 |
+
TORCH_API void _do_warn(const char* _reason, const char* _kind);
|
| 168 |
+
inline void warn(const char* _reason, const char* _kind = nullptr) {
|
| 169 |
+
if (const auto& state = getTracingState()) {
|
| 170 |
+
if (!state->warn)
|
| 171 |
+
return;
|
| 172 |
+
_do_warn(_reason, _kind);
|
| 173 |
+
}
|
| 174 |
+
}
|
| 175 |
+
TORCH_API void setWarn(warn_fn_type fn);
|
| 176 |
+
|
| 177 |
+
struct TORCH_API NoWarn {
|
| 178 |
+
NoWarn() : state(getTracingState()) {
|
| 179 |
+
if (state) {
|
| 180 |
+
prev = state->warn;
|
| 181 |
+
state->warn = false;
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
~NoWarn() {
|
| 185 |
+
if (state) {
|
| 186 |
+
state->warn = prev;
|
| 187 |
+
}
|
| 188 |
+
}
|
| 189 |
+
std::shared_ptr<TracingState> state;
|
| 190 |
+
bool prev{false};
|
| 191 |
+
};
|
| 192 |
+
|
| 193 |
+
struct WithNestedTracingFrame {
|
| 194 |
+
WithNestedTracingFrame() {
|
| 195 |
+
getTracingState()->enterFrame();
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
~WithNestedTracingFrame() {
|
| 199 |
+
getTracingState()->leaveFrame();
|
| 200 |
+
}
|
| 201 |
+
};
|
| 202 |
+
TORCH_API void recordSourceLocation(Node* n);
|
| 203 |
+
TORCH_API void setRecordSourceLocation(void (*v)(Node*));
|
| 204 |
+
|
| 205 |
+
TORCH_API std::vector<StackEntry> pythonCallstack();
|
| 206 |
+
TORCH_API void setPythonCallstack(std::vector<StackEntry> (*v)());
|
| 207 |
+
|
| 208 |
+
// Having finished adding a new 'node' to the graph IR 'setValueTrace'
|
| 209 |
+
// associates this node with an output variable, so that further operations
|
| 210 |
+
// involving this variable know which node in the IR to reference.
|
| 211 |
+
TORCH_API void setValueTrace(const IValue& v, Value* value);
|
| 212 |
+
|
| 213 |
+
TORCH_API void delValueTrace(const IValue& var);
|
| 214 |
+
|
| 215 |
+
TORCH_API std::function<void()> pauseTracing();
|
| 216 |
+
|
| 217 |
+
TORCH_API Value* getValueTrace(const IValue& var);
|
| 218 |
+
|
| 219 |
+
TORCH_API std::pair<std::shared_ptr<TracingState>, Stack> trace(
|
| 220 |
+
Stack inputs,
|
| 221 |
+
const std::function<Stack(Stack)>& traced_fn,
|
| 222 |
+
std::function<std::string(const Variable&)> var_name_lookup_fn,
|
| 223 |
+
bool strict = true,
|
| 224 |
+
bool force_outplace = false,
|
| 225 |
+
Module* self = nullptr,
|
| 226 |
+
const std::vector<std::string>& argument_names = {});
|
| 227 |
+
|
| 228 |
+
TORCH_API void abandon();
|
| 229 |
+
|
| 230 |
+
// NB: those serve both as an intermediate steps in addInputs below,
|
| 231 |
+
// as well as the overloads that terminate template recursion
|
| 232 |
+
TORCH_API void addInputs(Node* n, const char* name, int64_t value);
|
| 233 |
+
TORCH_API void addInputs(Node* n, const char* name, const c10::SymInt& value);
|
| 234 |
+
TORCH_API void addInputs(
|
| 235 |
+
Node* n,
|
| 236 |
+
const char* name,
|
| 237 |
+
std::optional<int64_t> value);
|
| 238 |
+
TORCH_API void addInputs(Node* n, const char* name, bool value);
|
| 239 |
+
TORCH_API void addInputs(
|
| 240 |
+
Node* n,
|
| 241 |
+
const char* name,
|
| 242 |
+
const std::optional<bool>& value);
|
| 243 |
+
TORCH_API void addInputs(Node* n, const char* name, double value);
|
| 244 |
+
TORCH_API void addInputs(
|
| 245 |
+
Node* n,
|
| 246 |
+
const char* name,
|
| 247 |
+
const std::optional<double>& value);
|
| 248 |
+
TORCH_API void addInputs(Node* n, const char* name, const at::Scalar& value);
|
| 249 |
+
TORCH_API void addInputs(
|
| 250 |
+
Node* n,
|
| 251 |
+
const char* name,
|
| 252 |
+
const std::optional<at::Scalar>& value);
|
| 253 |
+
TORCH_API void addInputs(Node* n, const char* name, const at::Tensor& value);
|
| 254 |
+
TORCH_API void addInputs(
|
| 255 |
+
Node* n,
|
| 256 |
+
const char* name,
|
| 257 |
+
const std::optional<at::Tensor>& value);
|
| 258 |
+
TORCH_API void addInputs(Node* n, const char* name, ArrayRef<int64_t> value);
|
| 259 |
+
TORCH_API void addInputs(Node* n, const char* name, c10::SymIntArrayRef value);
|
| 260 |
+
TORCH_API void addInputs(
|
| 261 |
+
Node* n,
|
| 262 |
+
const char* name,
|
| 263 |
+
std::optional<c10::SymInt> value);
|
| 264 |
+
TORCH_API void addInputs(
|
| 265 |
+
Node* n,
|
| 266 |
+
const char* name,
|
| 267 |
+
const std::optional<ArrayRef<int64_t>>& value);
|
| 268 |
+
TORCH_API void addInputs(
|
| 269 |
+
Node* n,
|
| 270 |
+
const char* name,
|
| 271 |
+
const at::OptionalIntArrayRef& opt_value);
|
| 272 |
+
TORCH_API void addInputs(
|
| 273 |
+
Node* n,
|
| 274 |
+
const char* name,
|
| 275 |
+
const at::OptionalSymIntArrayRef& opt_value);
|
| 276 |
+
TORCH_API void addInputs(
|
| 277 |
+
Node* n,
|
| 278 |
+
const char* name,
|
| 279 |
+
ArrayRef<at::Tensor> value,
|
| 280 |
+
bool allow_undefined = false);
|
| 281 |
+
TORCH_API void addInputs(
|
| 282 |
+
Node* n,
|
| 283 |
+
const char* name,
|
| 284 |
+
const std::vector<at::Tensor>& value,
|
| 285 |
+
bool allow_undefined = false);
|
| 286 |
+
TORCH_API void addInputs(
|
| 287 |
+
Node* n,
|
| 288 |
+
const char* name,
|
| 289 |
+
at::ITensorListRef value,
|
| 290 |
+
bool allow_undefined = false);
|
| 291 |
+
TORCH_API void addInputs(
|
| 292 |
+
Node* n,
|
| 293 |
+
const char* name,
|
| 294 |
+
const List<std::optional<at::Tensor>>& value);
|
| 295 |
+
TORCH_API void addInputs(
|
| 296 |
+
Node* n,
|
| 297 |
+
const char* name,
|
| 298 |
+
ArrayRef<c10::intrusive_ptr<c10::ivalue::Object>> value,
|
| 299 |
+
const c10::ClassTypePtr& class_type);
|
| 300 |
+
TORCH_API void addInputs(Node* n, const char* name, ArrayRef<double> value);
|
| 301 |
+
TORCH_API void addInputs(
|
| 302 |
+
Node* n,
|
| 303 |
+
const char* name,
|
| 304 |
+
const std::optional<ArrayRef<double>>& value);
|
| 305 |
+
TORCH_API void addInputs(
|
| 306 |
+
Node* n,
|
| 307 |
+
const char* name,
|
| 308 |
+
const c10::string_view value);
|
| 309 |
+
TORCH_API void addInputs(
|
| 310 |
+
Node* n,
|
| 311 |
+
const char* name,
|
| 312 |
+
const std::optional<c10::string_view>& value);
|
| 313 |
+
TORCH_API void addInputs(Node* n, const char* name, at::Device value);
|
| 314 |
+
TORCH_API void addInputs(Node* n, const char* name, c10::Stream stream);
|
| 315 |
+
TORCH_API void addInputs(Node* n, const char* name, at::Layout value);
|
| 316 |
+
TORCH_API void addInputs(Node* n, const char* name, at::ScalarType value);
|
| 317 |
+
TORCH_API void addInputs(
|
| 318 |
+
Node* n,
|
| 319 |
+
const char* name,
|
| 320 |
+
const std::optional<at::ScalarType>& value);
|
| 321 |
+
TORCH_API void addInputs(
|
| 322 |
+
Node* n,
|
| 323 |
+
const char* name,
|
| 324 |
+
const std::optional<at::Device>& value);
|
| 325 |
+
TORCH_API void addInputs(
|
| 326 |
+
Node* n,
|
| 327 |
+
const char* name,
|
| 328 |
+
const std::optional<at::Layout>& value);
|
| 329 |
+
TORCH_API void addInputs(Node* n, const char* name, at::MemoryFormat value);
|
| 330 |
+
TORCH_API void addInputs(
|
| 331 |
+
Node* n,
|
| 332 |
+
const char* name,
|
| 333 |
+
std::optional<at::DimnameList> value);
|
| 334 |
+
TORCH_API void addInputs(
|
| 335 |
+
Node* n,
|
| 336 |
+
const char* name,
|
| 337 |
+
const std::optional<at::MemoryFormat>& value);
|
| 338 |
+
TORCH_API void addInputs(
|
| 339 |
+
Node* n,
|
| 340 |
+
const char* name,
|
| 341 |
+
const std::optional<at::Generator>& value);
|
| 342 |
+
|
| 343 |
+
inline void addInputs(
|
| 344 |
+
Node* n,
|
| 345 |
+
const char* name,
|
| 346 |
+
const std::vector<bool>& value) {
|
| 347 |
+
AT_ERROR("Tracing a list of bool type is currently not supported!");
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
template <typename T>
|
| 351 |
+
void addInputs(Node* n, const char* name, ArrayRef<T> value) {
|
| 352 |
+
AT_ERROR("Tracing a list of arbitrary type is currently not supported!");
|
| 353 |
+
}
|
| 354 |
+
template <typename K, typename V>
|
| 355 |
+
void addInputs(
|
| 356 |
+
Node* n,
|
| 357 |
+
const char* name,
|
| 358 |
+
const std::unordered_map<K, V>& value) {
|
| 359 |
+
AT_ERROR("Tracing a dict of arbitrary types is currently not supported!");
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
template <size_t N>
|
| 363 |
+
void addInputs(Node* n, const char* name, std::array<bool, N> value) {
|
| 364 |
+
throw std::runtime_error(
|
| 365 |
+
"Found an unsupported argument type in the JIT tracer. File a bug report.");
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
TORCH_API void addInputs(
|
| 369 |
+
Node* n,
|
| 370 |
+
const char* name,
|
| 371 |
+
const c10::intrusive_ptr<c10::ivalue::Object>& obj);
|
| 372 |
+
|
| 373 |
+
TORCH_API void ensureUniqueIfOutOfPlaced(
|
| 374 |
+
const char* name,
|
| 375 |
+
const at::Tensor& tensor);
|
| 376 |
+
TORCH_API void ensureUniqueIfOutOfPlaced(
|
| 377 |
+
const char* name,
|
| 378 |
+
const std::optional<at::Tensor>& tensor);
|
| 379 |
+
|
| 380 |
+
template <
|
| 381 |
+
typename T,
|
| 382 |
+
typename = std::enable_if_t<
|
| 383 |
+
(!std::is_convertible_v<std::decay_t<T>, at::TensorList> &&
|
| 384 |
+
!std::is_convertible_v<std::decay_t<T>, c10::List<at::Tensor>> &&
|
| 385 |
+
!std::is_convertible_v<std::decay_t<T>, at::Tensor> &&
|
| 386 |
+
!std::is_convertible_v<
|
| 387 |
+
std::decay_t<T>,
|
| 388 |
+
c10::intrusive_ptr<c10::ivalue::Object>>)>>
|
| 389 |
+
void addOutput(Node* node, T&&) {
|
| 390 |
+
AT_ERROR(
|
| 391 |
+
"Found an unsupported argument type ",
|
| 392 |
+
c10::demangle_type<T>(),
|
| 393 |
+
" in the JIT tracer. File a bug report.");
|
| 394 |
+
}
|
| 395 |
+
TORCH_API void addOutput(Node* node, const at::Tensor& tensor);
|
| 396 |
+
TORCH_API void setOutput(Value* value, const at::Tensor& output);
|
| 397 |
+
TORCH_API void addOutput(Node* node, const std::vector<at::Tensor>& list);
|
| 398 |
+
TORCH_API void addOutput(Node* node, const c10::List<at::Tensor>& list);
|
| 399 |
+
TORCH_API void addOutput(
|
| 400 |
+
Node* node,
|
| 401 |
+
const c10::intrusive_ptr<c10::ivalue::Object>& output);
|
| 402 |
+
|
| 403 |
+
TORCH_API autograd::Variable getSizeOf(
|
| 404 |
+
const autograd::Variable& var,
|
| 405 |
+
int64_t dim);
|
| 406 |
+
|
| 407 |
+
TORCH_API autograd::Variable getNumelOf(const autograd::Variable& var);
|
| 408 |
+
|
| 409 |
+
} // namespace tracer
|
| 410 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/versioned_symbols.h
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <caffe2/serialize/versions.h>
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
#include <torch/csrc/jit/api/module.h>
|
| 6 |
+
|
| 7 |
+
#include <cstdint>
|
| 8 |
+
|
| 9 |
+
namespace torch::jit {
|
| 10 |
+
// Maps the given symbol into an implementation of its behavior at the
|
| 11 |
+
// given version.
|
| 12 |
+
// See note [Versioned Symbols]
|
| 13 |
+
TORCH_API Symbol
|
| 14 |
+
get_symbol_for_version(const Symbol name, const uint64_t version);
|
| 15 |
+
|
| 16 |
+
// Maps the given kind to the minimum version that supports it.
|
| 17 |
+
// See note [Dynamic Versions and torch.jit.save vs. torch.save]
|
| 18 |
+
TORCH_API uint64_t get_min_version_for_kind(const NodeKind& kind);
|
| 19 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_data.h
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/TensorBase.h>
|
| 4 |
+
#include <c10/core/Device.h>
|
| 5 |
+
#include <torch/csrc/jit/mobile/module.h>
|
| 6 |
+
#include <optional>
|
| 7 |
+
|
| 8 |
+
#include <istream>
|
| 9 |
+
#include <map>
|
| 10 |
+
#include <string>
|
| 11 |
+
|
| 12 |
+
namespace torch::jit {
|
| 13 |
+
|
| 14 |
+
/**
|
| 15 |
+
* Loads named parameters from the serialized data in @p in.
|
| 16 |
+
*
|
| 17 |
+
* Calls #TORCH_CHECK() if the data format is not recognized.
|
| 18 |
+
*/
|
| 19 |
+
TORCH_API std::map<std::string, at::Tensor> _load_parameters(
|
| 20 |
+
std::istream& in,
|
| 21 |
+
std::optional<at::Device> device = std::nullopt);
|
| 22 |
+
|
| 23 |
+
/**
|
| 24 |
+
* Loads named parameters from the serialized data in @p filename.
|
| 25 |
+
*
|
| 26 |
+
* Calls #TORCH_CHECK() if the data format is not recognized.
|
| 27 |
+
*/
|
| 28 |
+
TORCH_API std::map<std::string, at::Tensor> _load_parameters(
|
| 29 |
+
const std::string& filename,
|
| 30 |
+
std::optional<at::Device> device = std::nullopt);
|
| 31 |
+
|
| 32 |
+
// NOTE: Please prefer using _load_parameters over using the function below.
|
| 33 |
+
TORCH_API std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
|
| 34 |
+
const mobile::Module& module);
|
| 35 |
+
|
| 36 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/interpreter.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <vector>
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/jit/mobile/code.h>
|
| 6 |
+
#include <torch/csrc/jit/mobile/frame.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::jit::mobile {
|
| 9 |
+
|
| 10 |
+
struct InterpreterState {
|
| 11 |
+
TORCH_API explicit InterpreterState(const Code& code);
|
| 12 |
+
TORCH_API bool run(Stack& stack);
|
| 13 |
+
|
| 14 |
+
private:
|
| 15 |
+
void enterFrame(const Code&);
|
| 16 |
+
void leaveFrame();
|
| 17 |
+
void saveExceptionDebugHandles();
|
| 18 |
+
void callFunction(torch::jit::Function& f, Stack& stack);
|
| 19 |
+
|
| 20 |
+
c10::IValue& reg(size_t reg);
|
| 21 |
+
std::vector<c10::IValue> registers_;
|
| 22 |
+
std::vector<Frame> frames_;
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
const std::vector<DebugHandle>& getInterpretersExceptionDebugHandles();
|
| 26 |
+
} // namespace torch::jit::mobile
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_bytecode.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/jit/mobile/function.h>
|
| 3 |
+
|
| 4 |
+
namespace torch::jit::mobile {
|
| 5 |
+
using c10::IValue;
|
| 6 |
+
TORCH_API void parseInstructions(
|
| 7 |
+
const std::string& function_name,
|
| 8 |
+
c10::ivalue::TupleElements&& ins_list,
|
| 9 |
+
c10::ivalue::TupleElements& debug_handles_m_tuple,
|
| 10 |
+
mobile::Function* function);
|
| 11 |
+
TORCH_API void parseConstants(
|
| 12 |
+
const c10::ivalue::TupleElements& consts_list,
|
| 13 |
+
mobile::Function* function);
|
| 14 |
+
TORCH_API void parseTypes(
|
| 15 |
+
const c10::ivalue::TupleElements& types_list,
|
| 16 |
+
mobile::Function* function);
|
| 17 |
+
TORCH_API void parseRegisterSize(size_t rsize, mobile::Function* function);
|
| 18 |
+
TORCH_API void applyUpgrader(
|
| 19 |
+
mobile::Function* function,
|
| 20 |
+
uint64_t operator_version);
|
| 21 |
+
} // namespace torch::jit::mobile
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/parse_operators.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/jit/mobile/function.h>
|
| 3 |
+
|
| 4 |
+
namespace torch::jit {
|
| 5 |
+
using c10::IValue;
|
| 6 |
+
|
| 7 |
+
enum MobileModuleLoadOptions {
|
| 8 |
+
OPERATOR_CHECK = 1,
|
| 9 |
+
// PARSE_ALL_EXTRA_FILE_MAPS is used to gate for ExtraFileMaps to pull all
|
| 10 |
+
// files automatically without explicit entries mapping. Refer to PR for a
|
| 11 |
+
// detail: https://github.com/pytorch/pytorch/pull/99747
|
| 12 |
+
PARSE_ALL_EXTRA_FILE_MAPS = 2,
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
const uint64_t kDefaultMobileLoadOptions =
|
| 16 |
+
MobileModuleLoadOptions::OPERATOR_CHECK;
|
| 17 |
+
|
| 18 |
+
namespace mobile {
|
| 19 |
+
|
| 20 |
+
TORCH_API void parseOperators(
|
| 21 |
+
c10::ivalue::TupleElements&& ops_list,
|
| 22 |
+
const uint64_t& module_load_options,
|
| 23 |
+
mobile::Function* function);
|
| 24 |
+
} // namespace mobile
|
| 25 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/promoted_prim_ops.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/jit/mobile/prim_ops_registery.h>
|
| 3 |
+
#include <torch/csrc/jit/mobile/register_ops_common_utils.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::jit {
|
| 6 |
+
|
| 7 |
+
void tupleIndex(Stack& stack);
|
| 8 |
+
|
| 9 |
+
void raiseException(Stack& stack);
|
| 10 |
+
|
| 11 |
+
void is(Stack& stack);
|
| 12 |
+
|
| 13 |
+
void unInitialized(Stack& stack);
|
| 14 |
+
|
| 15 |
+
void isNot(Stack& stack);
|
| 16 |
+
|
| 17 |
+
void aten_format(Stack& stack);
|
| 18 |
+
|
| 19 |
+
void size(Stack& stack);
|
| 20 |
+
|
| 21 |
+
void sym_size(Stack& stack);
|
| 22 |
+
|
| 23 |
+
void sym_size_int(Stack& stack);
|
| 24 |
+
|
| 25 |
+
void sym_stride_int(Stack& stack);
|
| 26 |
+
|
| 27 |
+
void sym_numel(Stack& stack);
|
| 28 |
+
|
| 29 |
+
void sym_storage_offset(Stack& stack);
|
| 30 |
+
|
| 31 |
+
void sym_stride(Stack& stack);
|
| 32 |
+
|
| 33 |
+
void device(Stack& stack);
|
| 34 |
+
|
| 35 |
+
void device_with_index(Stack& stack);
|
| 36 |
+
|
| 37 |
+
void dtype(Stack& stack);
|
| 38 |
+
|
| 39 |
+
void layout(Stack& stack);
|
| 40 |
+
|
| 41 |
+
void toPrimDType(Stack& stack);
|
| 42 |
+
|
| 43 |
+
void dim(Stack& stack);
|
| 44 |
+
|
| 45 |
+
void _not(Stack& stack);
|
| 46 |
+
|
| 47 |
+
void boolTensor(Stack& stack);
|
| 48 |
+
|
| 49 |
+
void toList(Stack& stack);
|
| 50 |
+
|
| 51 |
+
void numToTensorScalar(Stack& stack);
|
| 52 |
+
|
| 53 |
+
void isCuda(Stack& stack);
|
| 54 |
+
|
| 55 |
+
void numToTensorBool(Stack& stack);
|
| 56 |
+
|
| 57 |
+
void dictIndex(Stack& stack);
|
| 58 |
+
|
| 59 |
+
void raiseExceptionWithMessage(Stack& stack);
|
| 60 |
+
|
| 61 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/quantization.h
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Export.h>
|
| 4 |
+
#include <string>
|
| 5 |
+
|
| 6 |
+
namespace torch::jit::mobile {
|
| 7 |
+
class Module;
|
| 8 |
+
namespace quantization {
|
| 9 |
+
/*
|
| 10 |
+
* Device side PTQ API.
|
| 11 |
+
* Once the model has been prepared for quantization on server side, such model
|
| 12 |
+
* is sent to device. On device side the model is further trained. At the end of
|
| 13 |
+
* the training, before the model is readied for inference, we need to quantize
|
| 14 |
+
* the model.
|
| 15 |
+
* Usage of this API is as follows.
|
| 16 |
+
* PTQQuanizationHelper ptq_helper;
|
| 17 |
+
* ptq_helper.quantize_dynamic(m, "forward");
|
| 18 |
+
* Args:
|
| 19 |
+
* m: Captured by reference, an instance of mobile::Module. This module will be
|
| 20 |
+
* mutated in place to replace its <method_name> method with quantized
|
| 21 |
+
* equivalent. method:name: Name of the method to be quantized. AOT preparation
|
| 22 |
+
* for quantization must also have been done for this method. Returns: In place
|
| 23 |
+
* mutated `m` whose size should be smaller due to weight quantization and whose
|
| 24 |
+
* <method_name> method should use quantized ops
|
| 25 |
+
*/
|
| 26 |
+
class TORCH_API PTQQuanizationHelper {
|
| 27 |
+
public:
|
| 28 |
+
PTQQuanizationHelper() = default;
|
| 29 |
+
void quantize_dynamic(
|
| 30 |
+
torch::jit::mobile::Module& m,
|
| 31 |
+
const std::string& method_name);
|
| 32 |
+
};
|
| 33 |
+
} // namespace quantization
|
| 34 |
+
} // namespace torch::jit::mobile
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind_utils.h
ADDED
|
@@ -0,0 +1,1279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <ATen/core/jit_type.h>
|
| 5 |
+
#include <ATen/core/qualified_name.h>
|
| 6 |
+
#include <ATen/core/stack.h>
|
| 7 |
+
#include <pybind11/complex.h>
|
| 8 |
+
#include <pybind11/pybind11.h>
|
| 9 |
+
#include <pybind11/pytypes.h>
|
| 10 |
+
#include <torch/csrc/Device.h>
|
| 11 |
+
#include <torch/csrc/Dtype.h>
|
| 12 |
+
#include <torch/csrc/Export.h>
|
| 13 |
+
#include <torch/csrc/Layout.h>
|
| 14 |
+
#include <torch/csrc/QScheme.h>
|
| 15 |
+
#include <torch/csrc/Stream.h>
|
| 16 |
+
#include <torch/csrc/jit/api/module.h>
|
| 17 |
+
#include <torch/csrc/jit/frontend/schema_matching.h>
|
| 18 |
+
#include <torch/csrc/jit/frontend/tracer.h>
|
| 19 |
+
#include <torch/csrc/jit/python/module_python.h>
|
| 20 |
+
#include <torch/csrc/jit/python/python_custom_class.h>
|
| 21 |
+
#include <torch/csrc/jit/python/python_tracer.h>
|
| 22 |
+
#include <torch/csrc/jit/resource_guard.h>
|
| 23 |
+
#include <torch/csrc/jit/runtime/operator.h>
|
| 24 |
+
#include <torch/csrc/utils/pybind.h>
|
| 25 |
+
#include <torch/csrc/utils/python_arg_parser.h>
|
| 26 |
+
#include <torch/csrc/utils/six.h>
|
| 27 |
+
#ifdef USE_DISTRIBUTED
|
| 28 |
+
#include <torch/csrc/distributed/rpc/py_rref.h>
|
| 29 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 30 |
+
#endif
|
| 31 |
+
|
| 32 |
+
#include <ATen/core/function_schema.h>
|
| 33 |
+
#include <c10/core/Stream.h>
|
| 34 |
+
#include <c10/util/Exception.h>
|
| 35 |
+
#include <c10/util/irange.h>
|
| 36 |
+
#include <optional>
|
| 37 |
+
|
| 38 |
+
#include <algorithm>
|
| 39 |
+
#include <cstddef>
|
| 40 |
+
#include <string>
|
| 41 |
+
#include <utility>
|
| 42 |
+
#include <vector>
|
| 43 |
+
|
| 44 |
+
// The visibility attribute is to avoid a warning about storing a field in the
|
| 45 |
+
// struct that has a different visibility (from pybind) than the struct.
|
| 46 |
+
#ifdef _WIN32
|
| 47 |
+
#define VISIBILITY_HIDDEN
|
| 48 |
+
#else
|
| 49 |
+
#define VISIBILITY_HIDDEN __attribute__((visibility("hidden")))
|
| 50 |
+
#endif
|
| 51 |
+
|
| 52 |
+
namespace torch::jit {
|
| 53 |
+
|
| 54 |
+
using ResolutionCallback = std::function<py::object(std::string)>;
|
| 55 |
+
|
| 56 |
+
void clear_registered_instances(void* ptr);
|
| 57 |
+
|
| 58 |
+
TORCH_PYTHON_API IValue toIValue(
|
| 59 |
+
py::handle obj,
|
| 60 |
+
const TypePtr& type,
|
| 61 |
+
std::optional<int32_t> N = std::nullopt);
|
| 62 |
+
|
| 63 |
+
TORCH_PYTHON_API py::object toPyObject(IValue ivalue);
|
| 64 |
+
|
| 65 |
+
// Hack to overload the behavior of toIValue to accept Python
|
| 66 |
+
// numbers in places where a Tensor is expected
|
| 67 |
+
// See also torch::should_allow_numbers_as_tensors
|
| 68 |
+
class ToIValueAllowNumbersAsTensors {
|
| 69 |
+
bool old_;
|
| 70 |
+
|
| 71 |
+
public:
|
| 72 |
+
ToIValueAllowNumbersAsTensors(bool enable);
|
| 73 |
+
~ToIValueAllowNumbersAsTensors();
|
| 74 |
+
};
|
| 75 |
+
|
| 76 |
+
// Wrap Python function to guard deref
|
| 77 |
+
// NB: Need VISIBILITY_HIDDEN for silencing compiler error,
|
| 78 |
+
// 'torch::jit::PythonFunctionGuard' declared with greater visibility than the
|
| 79 |
+
// type of its field 'torch::jit::PythonFunctionGuard::func_'
|
| 80 |
+
struct VISIBILITY_HIDDEN PythonFunctionGuard {
|
| 81 |
+
explicit PythonFunctionGuard(py::function func) : func_(std::move(func)) {}
|
| 82 |
+
|
| 83 |
+
~PythonFunctionGuard() {
|
| 84 |
+
pybind11::gil_scoped_acquire ag;
|
| 85 |
+
func_.dec_ref();
|
| 86 |
+
// explicitly setting PyObject* to nullptr to prevent py::object's dtor to
|
| 87 |
+
// decref on the PyObject again.
|
| 88 |
+
// See Note [Destructing py::object] in python_ivalue.h
|
| 89 |
+
func_.ptr() = nullptr;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
py::function func_;
|
| 93 |
+
};
|
| 94 |
+
|
| 95 |
+
// The PythonFutureWrapper for ivalue::Future
|
| 96 |
+
//
|
| 97 |
+
// NB: VISIBILITY_HIDDEN is for silencing compiling error,
|
| 98 |
+
// "error: 'torch::jit::PythonFutureWrapper' declared with greater visibility
|
| 99 |
+
// than the type of its field 'torch::jit::PythonFutureWrapper::unwrap_func'
|
| 100 |
+
// [-Werror=attributes]"
|
| 101 |
+
//
|
| 102 |
+
// NB: inherit from enable_shared_from_this because then(py::function) needs to
|
| 103 |
+
// get a shared_ptr from this pointer.
|
| 104 |
+
struct VISIBILITY_HIDDEN PythonFutureWrapper
|
| 105 |
+
: std::enable_shared_from_this<PythonFutureWrapper> {
|
| 106 |
+
using UnwrapFunc = std::function<void(py::object)>;
|
| 107 |
+
|
| 108 |
+
explicit PythonFutureWrapper(
|
| 109 |
+
c10::intrusive_ptr<c10::ivalue::Future> fut,
|
| 110 |
+
std::optional<UnwrapFunc> unwrap_func = std::nullopt)
|
| 111 |
+
: fut(std::move(fut)), unwrap_func(std::move(unwrap_func)) {}
|
| 112 |
+
|
| 113 |
+
explicit PythonFutureWrapper(const PythonFutureWrapper&) = delete;
|
| 114 |
+
PythonFutureWrapper& operator=(const PythonFutureWrapper&) = delete;
|
| 115 |
+
|
| 116 |
+
bool done() {
|
| 117 |
+
return fut->completed();
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
py::object value() {
|
| 121 |
+
// acquiring GIL as toPyObject creates new py::object
|
| 122 |
+
// without grabbing the GIL.
|
| 123 |
+
py::gil_scoped_acquire acquire;
|
| 124 |
+
py::object py_obj = toPyObject(fut->value());
|
| 125 |
+
// unwrap_func is a general compositional function that takes in a
|
| 126 |
+
// py::object and executes some python function. It is currently mostly used
|
| 127 |
+
// to throw python exceptions.
|
| 128 |
+
if (unwrap_func) {
|
| 129 |
+
(*unwrap_func)(py_obj);
|
| 130 |
+
}
|
| 131 |
+
return py_obj;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
py::object wait() {
|
| 135 |
+
fut->wait();
|
| 136 |
+
if (jit::tracer::isTracing()) {
|
| 137 |
+
auto graph = jit::tracer::getTracingState()->graph;
|
| 138 |
+
|
| 139 |
+
Value* fut_val = jit::tracer::getValueTrace(fut);
|
| 140 |
+
auto output = graph->insert(aten::wait, {fut_val});
|
| 141 |
+
jit::tracer::setValueTrace(fut->value(), output);
|
| 142 |
+
}
|
| 143 |
+
return value();
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// The py::function cb arg must take a std::shared_ptr<PythonFutureWrapper>
|
| 147 |
+
// (i.e., torch._C.Future) as the only argument. If the type mismatches, an
|
| 148 |
+
// error will be thrown when waiting for the value of this returned Future.
|
| 149 |
+
std::shared_ptr<PythonFutureWrapper> then(py::function cb) {
|
| 150 |
+
// We need this an additional layer of wrapper here to guard the
|
| 151 |
+
// destruction of the py::function object. Because, the
|
| 152 |
+
// Future owns a reference to the py::function in its callback
|
| 153 |
+
// vector, but Future does not acquire GIL on destruction.
|
| 154 |
+
auto pf = std::make_shared<PythonFunctionGuard>(std::move(cb));
|
| 155 |
+
|
| 156 |
+
return std::make_shared<jit::PythonFutureWrapper>(fut->then(
|
| 157 |
+
// Capture a copy of the ivalue::Future instead of the `this` pointer
|
| 158 |
+
// because the PythonFutureWrapper object could have been deleted
|
| 159 |
+
// when the callbacks are fired. For example, RPC only captures the
|
| 160 |
+
// ivalue::Future instead of PythonFutureWrapper in JitFuture's
|
| 161 |
+
// callback functions. Hence, if user code does not hold a reference to
|
| 162 |
+
// this PythonFutureWrapper object, there is no guarantee that the
|
| 163 |
+
// PythonFutureWrapper is still valid when running the callback.
|
| 164 |
+
[pyFut(this->getPtr()),
|
| 165 |
+
pf(std::move(pf))](c10::ivalue::Future& /* unused */) -> IValue {
|
| 166 |
+
try {
|
| 167 |
+
pybind11::gil_scoped_acquire ag;
|
| 168 |
+
return toIValue(pf->func_(pyFut), PyObjectType::get());
|
| 169 |
+
} catch (py::error_already_set& e) {
|
| 170 |
+
auto err = std::runtime_error(c10::str(
|
| 171 |
+
"Got the following error when running the callback: ",
|
| 172 |
+
e.what()));
|
| 173 |
+
{
|
| 174 |
+
pybind11::gil_scoped_acquire ag;
|
| 175 |
+
// Release ownership on py::objects and also restore Python
|
| 176 |
+
// Error Indicator.
|
| 177 |
+
e.restore();
|
| 178 |
+
// Clear the Python Error Indicator as we has recorded the
|
| 179 |
+
// exception in the response message.
|
| 180 |
+
PyErr_Clear();
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
throw std::runtime_error(err);
|
| 184 |
+
}
|
| 185 |
+
},
|
| 186 |
+
PyObjectType::get()));
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
void add_done_callback(py::function cb) {
|
| 190 |
+
auto pf = std::make_shared<PythonFunctionGuard>(std::move(cb));
|
| 191 |
+
// NOLINTNEXTLINE(modernize-avoid-bind)
|
| 192 |
+
fut->addCallback(std::bind(
|
| 193 |
+
[pyFut(this->getPtr())](
|
| 194 |
+
const std::shared_ptr<PythonFunctionGuard>& pf) {
|
| 195 |
+
try {
|
| 196 |
+
pybind11::gil_scoped_acquire ag;
|
| 197 |
+
pf->func_(pyFut);
|
| 198 |
+
} catch (py::error_already_set& e) {
|
| 199 |
+
{
|
| 200 |
+
pybind11::gil_scoped_acquire ag;
|
| 201 |
+
// Release ownership on py::objects and also restore Python
|
| 202 |
+
// Error Indicator.
|
| 203 |
+
e.restore();
|
| 204 |
+
// Clear the Python Error Indicator as we has recorded the
|
| 205 |
+
// exception in the response message.
|
| 206 |
+
PyErr_Clear();
|
| 207 |
+
}
|
| 208 |
+
// Log and ignore exceptions raised through the callback
|
| 209 |
+
LOG(ERROR) << "Got the following error when running the callback: "
|
| 210 |
+
<< e.what();
|
| 211 |
+
|
| 212 |
+
} catch (const std::exception& e) {
|
| 213 |
+
// Log and ignore exceptions raised through the callback
|
| 214 |
+
LOG(ERROR) << "Got the following error when running the callback: "
|
| 215 |
+
<< e.what();
|
| 216 |
+
}
|
| 217 |
+
},
|
| 218 |
+
std::move(pf)));
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
void markCompleted(const py::object& pyValue) {
|
| 222 |
+
DCHECK(PyGILState_Check());
|
| 223 |
+
IValue value = toIValue(pyValue, PyObjectType::get());
|
| 224 |
+
|
| 225 |
+
py::gil_scoped_release release;
|
| 226 |
+
fut->markCompleted(std::move(value));
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
c10::intrusive_ptr<c10::ivalue::Future> fut;
|
| 230 |
+
// unwrap_func works like a callback for the value returned by
|
| 231 |
+
// PythonFutureWrapper::wait().
|
| 232 |
+
std::optional<UnwrapFunc> unwrap_func;
|
| 233 |
+
|
| 234 |
+
private:
|
| 235 |
+
std::shared_ptr<PythonFutureWrapper> getPtr() {
|
| 236 |
+
return shared_from_this();
|
| 237 |
+
}
|
| 238 |
+
};
|
| 239 |
+
|
| 240 |
+
// The PythonAwaitWrapper for ivalue::Await
|
| 241 |
+
//
|
| 242 |
+
// Expresses delayed function execution with Lazy semantic.
|
| 243 |
+
// i.e. Await[W] in eager mode can be used as W.
|
| 244 |
+
// When the attribute of W type is requested, Await[W] will return the
|
| 245 |
+
// attribute of W, transparently calling wait() beforehand.
|
| 246 |
+
// No Lazy semantic for script, explicit wait(Await[W]) -> W must be called to
|
| 247 |
+
// convert to type W.
|
| 248 |
+
//
|
| 249 |
+
// The Await object takes shared ownership of specified function and the
|
| 250 |
+
// arguments. After first call for wait() it owns the result. Deliberately no
|
| 251 |
+
// type inference for eager mode.
|
| 252 |
+
struct VISIBILITY_HIDDEN PythonAwaitWrapper
|
| 253 |
+
: std::enable_shared_from_this<PythonAwaitWrapper> {
|
| 254 |
+
explicit PythonAwaitWrapper(c10::intrusive_ptr<c10::ivalue::Await> aw)
|
| 255 |
+
: aw_(std::move(aw)) {}
|
| 256 |
+
explicit PythonAwaitWrapper(py::handle input) {
|
| 257 |
+
args_ = py::tuple(1u);
|
| 258 |
+
args_[0] = input;
|
| 259 |
+
auto type = PyObjectType::get();
|
| 260 |
+
aw_ = c10::make_intrusive<c10::ivalue::Await>(type);
|
| 261 |
+
aw_->markCompleted(toIValue(input, type));
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
explicit PythonAwaitWrapper(py::function pf, py::tuple args)
|
| 265 |
+
: args_(std::move(args)) {
|
| 266 |
+
pyfg_ = std::make_shared<torch::jit::PythonFunctionGuard>(std::move(pf));
|
| 267 |
+
|
| 268 |
+
std::function<IValue()> f = [fg(pyfg_), &args(args_)]() {
|
| 269 |
+
pybind11::gil_scoped_acquire ag;
|
| 270 |
+
return toIValue(fg->func_(*args), PyObjectType::get());
|
| 271 |
+
};
|
| 272 |
+
aw_ = c10::make_intrusive<c10::ivalue::Await>(
|
| 273 |
+
PyObjectType::get(), std::move(f));
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
explicit PythonAwaitWrapper(const PythonAwaitWrapper&) = delete;
|
| 277 |
+
PythonAwaitWrapper& operator=(const PythonAwaitWrapper&) = delete;
|
| 278 |
+
|
| 279 |
+
py::object wait() {
|
| 280 |
+
py::gil_scoped_acquire acquire;
|
| 281 |
+
return toPyObject(aw_->wait());
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
// Nowait semantic means trivial case when Await is constructed from the
|
| 285 |
+
// result
|
| 286 |
+
bool is_nowait() {
|
| 287 |
+
return pyfg_ == nullptr;
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
const py::function fn() {
|
| 291 |
+
TORCH_CHECK(
|
| 292 |
+
pyfg_, "Await constructed as awaitable_nowait does not have fn");
|
| 293 |
+
return pyfg_->func_;
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
const py::tuple args() {
|
| 297 |
+
return args_;
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
TypePtr type() {
|
| 301 |
+
return aw_->type();
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
c10::intrusive_ptr<c10::ivalue::Await> aw_;
|
| 305 |
+
std::shared_ptr<torch::jit::PythonFunctionGuard> pyfg_;
|
| 306 |
+
py::tuple args_;
|
| 307 |
+
|
| 308 |
+
private:
|
| 309 |
+
std::shared_ptr<PythonAwaitWrapper> getPtr() {
|
| 310 |
+
return shared_from_this();
|
| 311 |
+
}
|
| 312 |
+
};
|
| 313 |
+
|
| 314 |
+
// error reporting: when reporting user-caused errors, these functions should
|
| 315 |
+
// not use AT_ERROR macros, since these macros add stack trace information
|
| 316 |
+
// that is confusing to display to the end user since it always reports
|
| 317 |
+
// locations in libtorch code rather than user code.
|
| 318 |
+
|
| 319 |
+
inline std::shared_ptr<CompilationUnit> get_python_cu() {
|
| 320 |
+
return py::module::import("torch.jit._state")
|
| 321 |
+
.attr("_python_cu")
|
| 322 |
+
.cast<std::shared_ptr<CompilationUnit>>();
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
struct TypedIValue : public std::pair<IValue, TypePtr> {
|
| 326 |
+
using pair::pair;
|
| 327 |
+
|
| 328 |
+
IValue& ivalue() {
|
| 329 |
+
return this->first;
|
| 330 |
+
}
|
| 331 |
+
TypePtr& type() {
|
| 332 |
+
return this->second;
|
| 333 |
+
}
|
| 334 |
+
};
|
| 335 |
+
|
| 336 |
+
inline TypedIValue toDictKeyIValue(py::handle key) {
|
| 337 |
+
if (py::isinstance<py::str>(key)) {
|
| 338 |
+
return TypedIValue(
|
| 339 |
+
ConstantString::create(py::cast<std::string>(key)), StringType::get());
|
| 340 |
+
} else if (py::isinstance<py::int_>(key)) {
|
| 341 |
+
return TypedIValue(py::cast<int64_t>(key), IntType::get());
|
| 342 |
+
} else if (py::isinstance<py::float_>(key)) {
|
| 343 |
+
return TypedIValue(py::cast<double>(key), FloatType::get());
|
| 344 |
+
} else {
|
| 345 |
+
AT_ERROR("Dictionary inputs may only have string, int, or float keys");
|
| 346 |
+
}
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
inline std::optional<TypePtr> unifyOrInitializeType(
|
| 350 |
+
const TypePtr& accum,
|
| 351 |
+
const TypePtr& unify) {
|
| 352 |
+
if (!accum) {
|
| 353 |
+
return unify;
|
| 354 |
+
}
|
| 355 |
+
return unifyTypes(accum, unify);
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
using InferredType = c10::InferredType;
|
| 359 |
+
|
| 360 |
+
InferredType tryToInferContainerType(py::handle input, bool primitiveTypeOnly);
|
| 361 |
+
|
| 362 |
+
// Try to infer the type of a Python object
|
| 363 |
+
// The type cannot be inferred if:
|
| 364 |
+
// input is an empty container (list, dict)
|
| 365 |
+
// input is an list with element types that cannot be unified
|
| 366 |
+
// input is an dict with key or value types that cannot be unified
|
| 367 |
+
inline InferredType tryToInferType(py::handle input) {
|
| 368 |
+
// Try tensor types
|
| 369 |
+
if (THPVariable_Check(input.ptr())) {
|
| 370 |
+
return InferredType(TensorType::get());
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
if (input.is_none()) {
|
| 374 |
+
return InferredType(NoneType::get());
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
if (py::isinstance<StrongFunctionPtr>(input)) {
|
| 378 |
+
auto fn = py::cast<StrongFunctionPtr>(input).function_;
|
| 379 |
+
return InferredType(FunctionType::create(fn));
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
// Try basic types first
|
| 383 |
+
if (py::isinstance<py::bool_>(input)) {
|
| 384 |
+
return InferredType(BoolType::get());
|
| 385 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 386 |
+
} else if (py::isinstance<py::int_>(input)) {
|
| 387 |
+
return InferredType(IntType::get());
|
| 388 |
+
} else if (py::isinstance<py::float_>(input)) {
|
| 389 |
+
return InferredType(FloatType::get());
|
| 390 |
+
} else if (PyComplex_CheckExact(input.ptr())) {
|
| 391 |
+
return InferredType(ComplexType::get());
|
| 392 |
+
} else if (py::isinstance<py::str>(input)) {
|
| 393 |
+
return InferredType(StringType::get());
|
| 394 |
+
} else if (THPLayout_Check(input.ptr())) {
|
| 395 |
+
return InferredType(IntType::get());
|
| 396 |
+
} else if (THPDevice_Check(input.ptr())) {
|
| 397 |
+
return InferredType(DeviceObjType::get());
|
| 398 |
+
} else if (THPGenerator_Check(input.ptr())) {
|
| 399 |
+
return InferredType(GeneratorType::get());
|
| 400 |
+
} else if (THPStream_Check(input.ptr())) {
|
| 401 |
+
return InferredType(StreamObjType::get());
|
| 402 |
+
} else if (THPDtype_Check(input.ptr())) {
|
| 403 |
+
return InferredType(IntType::get());
|
| 404 |
+
} else if (THPQScheme_Check(input.ptr())) {
|
| 405 |
+
return InferredType(IntType::get());
|
| 406 |
+
} else if (THPLayout_Check(input.ptr())) {
|
| 407 |
+
return InferredType(IntType::get());
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
auto enum_type = py::module::import("enum").attr("Enum");
|
| 411 |
+
py::bool_ isEnumValue = py::isinstance(input, enum_type);
|
| 412 |
+
if (py::cast<bool>(isEnumValue)) {
|
| 413 |
+
auto enum_class = input.attr("__class__");
|
| 414 |
+
auto enum_type = py::cast<TypePtr>(
|
| 415 |
+
py::module::import("torch.jit.annotations")
|
| 416 |
+
.attr("try_ann_to_type")(enum_class, SourceRange()));
|
| 417 |
+
return InferredType(std::move(enum_type));
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
py::bool_ isClass =
|
| 421 |
+
py::module::import("inspect").attr("isclass")(input.get_type());
|
| 422 |
+
if (py::cast<bool>(isClass)) {
|
| 423 |
+
// Assume that the class is compiled already or will compile. Invalidate
|
| 424 |
+
// this later if needed.
|
| 425 |
+
bool class_compiled = true;
|
| 426 |
+
|
| 427 |
+
// Check if the type is already compiled.
|
| 428 |
+
py::object existing_ty = py::module::import("torch.jit._state")
|
| 429 |
+
.attr("_get_script_class")(input.get_type());
|
| 430 |
+
|
| 431 |
+
if (existing_ty.is_none()) {
|
| 432 |
+
// If not, try to compile it.
|
| 433 |
+
py::bool_ can_compile = py::module::import("torch._jit_internal")
|
| 434 |
+
.attr("can_compile_class")(input.get_type());
|
| 435 |
+
|
| 436 |
+
if (py::cast<bool>(can_compile)) {
|
| 437 |
+
// Try to compile the class. This is wrapped in a try-catch because
|
| 438 |
+
// compilation of class types can raise an Exception and in that case,
|
| 439 |
+
// we want to defer to other attempts at type inference below rather
|
| 440 |
+
// than fail compilation altogether.
|
| 441 |
+
try {
|
| 442 |
+
py::module::import("torch.jit._script")
|
| 443 |
+
.attr("_recursive_compile_class")(
|
| 444 |
+
input.get_type(), SourceRange());
|
| 445 |
+
} catch (...) {
|
| 446 |
+
// Invalidate the assumption that the class compiled so that we don't
|
| 447 |
+
// look up and return its JIT type as the type for the input.
|
| 448 |
+
class_compiled = false;
|
| 449 |
+
}
|
| 450 |
+
}
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
// If the class compiled successfully, look up the existing JIT type by
|
| 454 |
+
// qualified name and return it.
|
| 455 |
+
if (class_compiled) {
|
| 456 |
+
auto script_class = py::module::import("torch.jit._state")
|
| 457 |
+
.attr("_get_script_class")(input.get_type());
|
| 458 |
+
|
| 459 |
+
if (!script_class.is_none()) {
|
| 460 |
+
auto class_type = py::cast<ClassTypePtr>(script_class);
|
| 461 |
+
|
| 462 |
+
if (class_type && !class_type->is_module()) {
|
| 463 |
+
return InferredType(std::move(class_type));
|
| 464 |
+
}
|
| 465 |
+
}
|
| 466 |
+
}
|
| 467 |
+
}
|
| 468 |
+
|
| 469 |
+
if (py::isinstance<Object>(input)) {
|
| 470 |
+
auto object = py::cast<Object>(input);
|
| 471 |
+
return InferredType(object.type());
|
| 472 |
+
#ifdef USE_RPC
|
| 473 |
+
} else if (py::isinstance<torch::distributed::rpc::PyRRef>(input)) {
|
| 474 |
+
auto rref_ivalue = input.cast<torch::distributed::rpc::PyRRef>().toIValue();
|
| 475 |
+
return InferredType(rref_ivalue.type());
|
| 476 |
+
#endif
|
| 477 |
+
}
|
| 478 |
+
|
| 479 |
+
auto await_type = py::module::import("torch._awaits").attr("_Await");
|
| 480 |
+
py::bool_ is_await = py::isinstance(input, await_type);
|
| 481 |
+
if (py::cast<bool>(is_await)) {
|
| 482 |
+
auto awptr = input.cast<std::shared_ptr<PythonAwaitWrapper>>();
|
| 483 |
+
return InferredType(AwaitType::create(awptr->aw_->elementType()));
|
| 484 |
+
}
|
| 485 |
+
|
| 486 |
+
if (as_module(py::cast<py::object>(input))) {
|
| 487 |
+
return InferredType("Cannot infer type of ScriptModule");
|
| 488 |
+
}
|
| 489 |
+
|
| 490 |
+
auto module_type = py::module::import("torch.nn").attr("Module");
|
| 491 |
+
py::bool_ is_module = py::isinstance(input, module_type);
|
| 492 |
+
if (py::cast<bool>(is_module)) {
|
| 493 |
+
return InferredType("Cannot infer concrete type of torch.nn.Module");
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
// Try container types
|
| 497 |
+
return tryToInferContainerType(input, false);
|
| 498 |
+
}
|
| 499 |
+
|
| 500 |
+
// This function is similar to tryToInferType, but it only tries to infer
|
| 501 |
+
// primitive types (int, float, bool, complex) or nested container of primitive
|
| 502 |
+
// types.
|
| 503 |
+
inline InferredType tryToInferPrimitiveType(py::handle input) {
|
| 504 |
+
if (input.is_none()) {
|
| 505 |
+
return InferredType(NoneType::get());
|
| 506 |
+
}
|
| 507 |
+
|
| 508 |
+
// Only primitive data type
|
| 509 |
+
if (py::isinstance<py::bool_>(input)) {
|
| 510 |
+
return InferredType(BoolType::get());
|
| 511 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 512 |
+
} else if (py::isinstance<py::int_>(input)) {
|
| 513 |
+
return InferredType(IntType::get());
|
| 514 |
+
} else if (py::isinstance<py::float_>(input)) {
|
| 515 |
+
return InferredType(FloatType::get());
|
| 516 |
+
} else if (PyComplex_CheckExact(input.ptr())) {
|
| 517 |
+
return InferredType(ComplexType::get());
|
| 518 |
+
}
|
| 519 |
+
|
| 520 |
+
// Try container types
|
| 521 |
+
return tryToInferContainerType(input, true);
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
inline InferredType tryToInferContainerType(
|
| 525 |
+
py::handle input,
|
| 526 |
+
bool primitiveTypeOnly = false) {
|
| 527 |
+
if (six::isTuple(input)) {
|
| 528 |
+
py::tuple tuple = py::cast<py::tuple>(input);
|
| 529 |
+
std::vector<TypePtr> element_types;
|
| 530 |
+
element_types.reserve(tuple.size());
|
| 531 |
+
|
| 532 |
+
for (py::handle elem : tuple) {
|
| 533 |
+
auto type_match = primitiveTypeOnly ? tryToInferPrimitiveType(elem)
|
| 534 |
+
: tryToInferType(elem);
|
| 535 |
+
if (type_match.success()) {
|
| 536 |
+
element_types.push_back(type_match.type());
|
| 537 |
+
} else {
|
| 538 |
+
// Forward error message along
|
| 539 |
+
return type_match.reason();
|
| 540 |
+
}
|
| 541 |
+
}
|
| 542 |
+
return InferredType(TupleType::create(std::move(element_types)));
|
| 543 |
+
} else if (PyDict_Check(input.ptr())) {
|
| 544 |
+
// Check to make sure we can generate useful input/output types
|
| 545 |
+
auto dict = py::cast<py::dict>(input);
|
| 546 |
+
size_t len = py::len(dict);
|
| 547 |
+
if (!len) {
|
| 548 |
+
return InferredType("Dictionary inputs must have entries");
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
TypePtr key_type = nullptr;
|
| 552 |
+
TypePtr value_type = nullptr;
|
| 553 |
+
|
| 554 |
+
for (auto entry : dict) {
|
| 555 |
+
// Try to infer the key type and unify it with the existing one
|
| 556 |
+
auto entry_key_type_match = primitiveTypeOnly
|
| 557 |
+
? tryToInferPrimitiveType(entry.first)
|
| 558 |
+
: tryToInferType(entry.first);
|
| 559 |
+
if (!entry_key_type_match.success()) {
|
| 560 |
+
return entry_key_type_match.reason();
|
| 561 |
+
}
|
| 562 |
+
auto unified_key =
|
| 563 |
+
unifyOrInitializeType(key_type, entry_key_type_match.type());
|
| 564 |
+
if (!unified_key) {
|
| 565 |
+
return InferredType(c10::str(
|
| 566 |
+
"Dictionary inputs to traced functions must have consistent type. Found ",
|
| 567 |
+
key_type->repr_str(),
|
| 568 |
+
" and ",
|
| 569 |
+
(entry_key_type_match.type())->repr_str()));
|
| 570 |
+
}
|
| 571 |
+
|
| 572 |
+
// Try to infer the value type and unify it with the existing one
|
| 573 |
+
auto entry_value_type_match = primitiveTypeOnly
|
| 574 |
+
? tryToInferPrimitiveType(entry.second)
|
| 575 |
+
: tryToInferType(entry.second);
|
| 576 |
+
if (!entry_value_type_match.success()) {
|
| 577 |
+
return entry_value_type_match.reason();
|
| 578 |
+
}
|
| 579 |
+
auto unified_value =
|
| 580 |
+
unifyOrInitializeType(value_type, entry_value_type_match.type());
|
| 581 |
+
if (!unified_value) {
|
| 582 |
+
return InferredType(c10::str(
|
| 583 |
+
"Dictionary inputs to traced functions must have consistent type. Found ",
|
| 584 |
+
value_type->repr_str(),
|
| 585 |
+
" and ",
|
| 586 |
+
(entry_value_type_match.type())->repr_str()));
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
key_type = *unified_key;
|
| 590 |
+
value_type = *unified_value;
|
| 591 |
+
}
|
| 592 |
+
return InferredType(
|
| 593 |
+
DictType::create(std::move(key_type), std::move(value_type)));
|
| 594 |
+
} else if (PyList_Check(input.ptr())) {
|
| 595 |
+
auto list = py::cast<py::list>(input);
|
| 596 |
+
size_t len = py::len(list);
|
| 597 |
+
if (!len) {
|
| 598 |
+
return InferredType("List trace inputs must have elements");
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
TypePtr element_type = nullptr;
|
| 602 |
+
for (auto elem : list) {
|
| 603 |
+
auto element_type_match = primitiveTypeOnly
|
| 604 |
+
? tryToInferPrimitiveType(elem)
|
| 605 |
+
: tryToInferType(elem);
|
| 606 |
+
if (!element_type_match.success()) {
|
| 607 |
+
return InferredType(c10::str(
|
| 608 |
+
"Could not infer type of list element: ",
|
| 609 |
+
element_type_match.reason()));
|
| 610 |
+
}
|
| 611 |
+
auto unified_type =
|
| 612 |
+
unifyOrInitializeType(element_type, element_type_match.type());
|
| 613 |
+
if (!unified_type) {
|
| 614 |
+
return InferredType(c10::str(
|
| 615 |
+
"List inputs to traced functions must have consistent element type. Found ",
|
| 616 |
+
element_type->repr_str(),
|
| 617 |
+
" and ",
|
| 618 |
+
(element_type_match.type())->repr_str()));
|
| 619 |
+
}
|
| 620 |
+
element_type = *unified_type;
|
| 621 |
+
}
|
| 622 |
+
return InferredType(ListType::create(element_type));
|
| 623 |
+
} else {
|
| 624 |
+
if (primitiveTypeOnly) {
|
| 625 |
+
return InferredType(c10::str(
|
| 626 |
+
"Only tuple, list, or dict (possibly nested) of primitive types (bool, float, int, complex)",
|
| 627 |
+
"are supported ",
|
| 628 |
+
"as inputs or outputs of traced functions",
|
| 629 |
+
", but instead got value of type ",
|
| 630 |
+
py::str(input.get_type().attr("__name__")),
|
| 631 |
+
"."));
|
| 632 |
+
} else {
|
| 633 |
+
// TODO: this message is not correct anymore, since this InferredType is
|
| 634 |
+
// used from a bunch of circumstances unrelated to tracing. We can re-use
|
| 635 |
+
// this instead of the attribute_failure stuff in concreteType
|
| 636 |
+
return InferredType(c10::str(
|
| 637 |
+
"Only tensors and (possibly nested) tuples of tensors, lists, or dicts",
|
| 638 |
+
"are supported ",
|
| 639 |
+
"as inputs or outputs of traced functions",
|
| 640 |
+
", but instead got value of type ",
|
| 641 |
+
py::str(input.get_type().attr("__name__")),
|
| 642 |
+
"."));
|
| 643 |
+
}
|
| 644 |
+
}
|
| 645 |
+
}
|
| 646 |
+
|
| 647 |
+
inline bool isTraceableType(const TypePtr& type) {
|
| 648 |
+
if (type->isSubtypeOf(*TensorType::get())) {
|
| 649 |
+
return true;
|
| 650 |
+
}
|
| 651 |
+
|
| 652 |
+
if (auto list_type = type->cast<ListType>()) {
|
| 653 |
+
return isTraceableType(list_type->getElementType());
|
| 654 |
+
}
|
| 655 |
+
|
| 656 |
+
if (auto tuple_type = type->cast<TupleType>()) {
|
| 657 |
+
return std::all_of(
|
| 658 |
+
tuple_type->elements().begin(),
|
| 659 |
+
tuple_type->elements().end(),
|
| 660 |
+
[](const TypePtr& element_type) {
|
| 661 |
+
return isTraceableType(element_type);
|
| 662 |
+
});
|
| 663 |
+
}
|
| 664 |
+
|
| 665 |
+
if (auto dict_type = type->cast<DictType>()) {
|
| 666 |
+
return isTraceableType(dict_type->getValueType());
|
| 667 |
+
}
|
| 668 |
+
|
| 669 |
+
return false;
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
inline IValue toTypeInferredIValue(py::handle input) {
|
| 673 |
+
auto match = tryToInferType(input);
|
| 674 |
+
if (!match.success()) {
|
| 675 |
+
auto object = py::cast<py::object>(input);
|
| 676 |
+
if (auto mod = as_module(object)) {
|
| 677 |
+
// if obj is already a ScriptModule, just return its ivalue
|
| 678 |
+
auto ptr = mod.value()._ivalue();
|
| 679 |
+
// explict copy semantics for strong ownership of the resource.
|
| 680 |
+
return c10::intrusive_ptr<c10::ivalue::Object>::reclaim_copy(
|
| 681 |
+
ptr.release());
|
| 682 |
+
}
|
| 683 |
+
|
| 684 |
+
// Check if the obj is a ScriptObject.
|
| 685 |
+
if (auto script_obj = as_object(object)) {
|
| 686 |
+
auto ptr = script_obj.value()._ivalue();
|
| 687 |
+
return c10::intrusive_ptr<c10::ivalue::Object>::reclaim_copy(
|
| 688 |
+
ptr.release());
|
| 689 |
+
}
|
| 690 |
+
AT_ERROR(
|
| 691 |
+
"Tracer cannot infer type of ", py::str(input), "\n:", match.reason());
|
| 692 |
+
}
|
| 693 |
+
return toIValue(input, match.type());
|
| 694 |
+
}
|
| 695 |
+
|
| 696 |
+
inline Stack toTraceableStack(const py::tuple& inputs) {
|
| 697 |
+
auto info = toTypeInferredIValue(inputs);
|
| 698 |
+
TORCH_CHECK(
|
| 699 |
+
isTraceableType(info.type()),
|
| 700 |
+
"Type '",
|
| 701 |
+
info.type()->repr_str(),
|
| 702 |
+
"' cannot be traced. Only Tensors and (possibly nested) Lists, Dicts, and"
|
| 703 |
+
" Tuples of Tensors can be traced");
|
| 704 |
+
return info.toTupleRef().elements().vec();
|
| 705 |
+
}
|
| 706 |
+
|
| 707 |
+
// Serialize the python dictionary into a traceable stack.
|
| 708 |
+
inline Stack toTraceableStack(const py::dict& inputs) {
|
| 709 |
+
Stack res;
|
| 710 |
+
for (auto it = inputs.begin(); it != inputs.end(); it++) {
|
| 711 |
+
if (THPVariable_Check(it->second.ptr())) {
|
| 712 |
+
res.push_back(toIValue(it->second, tryToInferType(it->second).type()));
|
| 713 |
+
}
|
| 714 |
+
}
|
| 715 |
+
return res;
|
| 716 |
+
}
|
| 717 |
+
|
| 718 |
+
inline IValue createGenericList(py::handle obj, const TypePtr& elem_type) {
|
| 719 |
+
auto elems = c10::impl::GenericList(elem_type);
|
| 720 |
+
for (auto elem : obj) {
|
| 721 |
+
elems.push_back(toIValue(elem, elem_type));
|
| 722 |
+
}
|
| 723 |
+
return IValue(elems);
|
| 724 |
+
}
|
| 725 |
+
|
| 726 |
+
inline IValue createGenericDict(
|
| 727 |
+
const py::dict& obj,
|
| 728 |
+
const TypePtr& key_type,
|
| 729 |
+
const TypePtr& value_type) {
|
| 730 |
+
c10::impl::GenericDict elems(key_type, value_type);
|
| 731 |
+
elems.reserve(py::len(obj));
|
| 732 |
+
for (auto& entry : obj) {
|
| 733 |
+
elems.insert(
|
| 734 |
+
toIValue(entry.first, key_type), toIValue(entry.second, value_type));
|
| 735 |
+
}
|
| 736 |
+
return IValue(elems);
|
| 737 |
+
}
|
| 738 |
+
|
| 739 |
+
template <class T>
|
| 740 |
+
inline void guardAgainstNamedTensor(const T& var) {
|
| 741 |
+
TORCH_CHECK(
|
| 742 |
+
!var.has_names(),
|
| 743 |
+
"NYI: Named tensors are currently unsupported in TorchScript. As a "
|
| 744 |
+
"workaround please drop names via `tensor = tensor.rename(None)`.");
|
| 745 |
+
}
|
| 746 |
+
|
| 747 |
+
// Extract custom class registered with torchbind
|
| 748 |
+
template <typename T>
|
| 749 |
+
c10::intrusive_ptr<T> toCustomClass(py::handle obj) {
|
| 750 |
+
static_assert(
|
| 751 |
+
std::is_base_of_v<CustomClassHolder, T>, "T is not a CustomClass");
|
| 752 |
+
const auto& type = c10::getCustomClassType<c10::intrusive_ptr<T>>();
|
| 753 |
+
c10::IValue ivalue = toIValue(obj, type);
|
| 754 |
+
return std::move(ivalue).toCustomClass<T>();
|
| 755 |
+
}
|
| 756 |
+
|
| 757 |
+
// Small wrapper around getting the type name string from Python to make
|
| 758 |
+
// types easier to interpret, e.g. give the structural type for a NamedTuple
|
| 759 |
+
inline std::string friendlyTypeName(py::handle obj) {
|
| 760 |
+
if (py::isinstance<py::tuple>(obj) && py::hasattr(obj, "_fields")) {
|
| 761 |
+
auto field_names =
|
| 762 |
+
py::cast<std::vector<std::string>>(py::getattr(obj, "_fields"));
|
| 763 |
+
std::stringstream ss;
|
| 764 |
+
ss << py::str(obj.get_type().attr("__name__"));
|
| 765 |
+
ss << " (aka NamedTuple(";
|
| 766 |
+
bool first = true;
|
| 767 |
+
for (auto& field_name : field_names) {
|
| 768 |
+
if (!first) {
|
| 769 |
+
ss << ", ";
|
| 770 |
+
}
|
| 771 |
+
ss << field_name;
|
| 772 |
+
first = false;
|
| 773 |
+
}
|
| 774 |
+
ss << "))";
|
| 775 |
+
return ss.str();
|
| 776 |
+
} else {
|
| 777 |
+
return py::str(obj.get_type().attr("__name__"));
|
| 778 |
+
}
|
| 779 |
+
}
|
| 780 |
+
|
| 781 |
+
// Thrown when trying to create a schema for a list of python
|
| 782 |
+
// arguments that cannot be converted.
|
| 783 |
+
// Can be caught by the caller to attempt to use other schema
|
| 784 |
+
// when there is an overloaded operator.
|
| 785 |
+
struct schema_match_error : public std::runtime_error {
|
| 786 |
+
using std::runtime_error::runtime_error;
|
| 787 |
+
};
|
| 788 |
+
|
| 789 |
+
inline IValue argumentToIValue(
|
| 790 |
+
const FunctionSchema& schema,
|
| 791 |
+
size_t argumentPosition,
|
| 792 |
+
py::handle object) {
|
| 793 |
+
const auto& argument = schema.arguments().at(argumentPosition);
|
| 794 |
+
try {
|
| 795 |
+
return toIValue(object, argument.real_type(), argument.N());
|
| 796 |
+
} catch (const py::cast_error& error) {
|
| 797 |
+
throw schema_match_error(c10::str(
|
| 798 |
+
schema.formatTypeMismatchMsg(
|
| 799 |
+
argument,
|
| 800 |
+
friendlyTypeName(object),
|
| 801 |
+
argumentPosition,
|
| 802 |
+
py::repr(object)),
|
| 803 |
+
"\nCast error details: ",
|
| 804 |
+
error.what()));
|
| 805 |
+
} catch (const py::error_already_set& error) {
|
| 806 |
+
throw schema_match_error(c10::str(
|
| 807 |
+
schema.formatTypeMismatchMsg(
|
| 808 |
+
argument,
|
| 809 |
+
friendlyTypeName(object),
|
| 810 |
+
argumentPosition,
|
| 811 |
+
py::repr(object)),
|
| 812 |
+
"\n Python error details: ",
|
| 813 |
+
error.what()));
|
| 814 |
+
}
|
| 815 |
+
}
|
| 816 |
+
|
| 817 |
+
inline IValue returnToIValue(const TypePtr& type, py::handle object) {
|
| 818 |
+
try {
|
| 819 |
+
return toIValue(object, type);
|
| 820 |
+
} catch (const py::cast_error& error) {
|
| 821 |
+
throw std::runtime_error(c10::str(
|
| 822 |
+
" expected value of type ",
|
| 823 |
+
type->str(),
|
| 824 |
+
" for return value but instead got value of type ",
|
| 825 |
+
py::str(object.get_type().attr("__name__")),
|
| 826 |
+
".",
|
| 827 |
+
"\nValue: ",
|
| 828 |
+
py::repr(object),
|
| 829 |
+
"\nCast error details: ",
|
| 830 |
+
error.what()));
|
| 831 |
+
}
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
inline py::object getScriptedClassOrError(const c10::NamedTypePtr& classType) {
|
| 835 |
+
auto py_class =
|
| 836 |
+
py::module::import("torch.jit._state")
|
| 837 |
+
.attr("_get_python_class")(classType->name()->qualifiedName());
|
| 838 |
+
if (py_class.is_none()) {
|
| 839 |
+
std::stringstream err;
|
| 840 |
+
err << "Unknown reference to ScriptClass ";
|
| 841 |
+
err << classType->name()->qualifiedName();
|
| 842 |
+
err << ". (Did you forget to import it?)";
|
| 843 |
+
throw std::runtime_error(err.str());
|
| 844 |
+
}
|
| 845 |
+
return py_class;
|
| 846 |
+
}
|
| 847 |
+
|
| 848 |
+
struct VISIBILITY_HIDDEN tuple_slice {
|
| 849 |
+
/*implicit*/ tuple_slice(py::tuple tup_)
|
| 850 |
+
: tup(std::move(tup_)), b(0), e(tup.size()) {}
|
| 851 |
+
tuple_slice(py::tuple tup_, int64_t b_)
|
| 852 |
+
: tup(std::move(tup_)), b(b_), e(tup.size()) {}
|
| 853 |
+
tuple_slice(py::tuple tup_, int64_t b_, int64_t e_)
|
| 854 |
+
: tup(std::move(tup_)), b(b_), e(e_) {}
|
| 855 |
+
py::detail::tuple_iterator begin() const {
|
| 856 |
+
return {tup, static_cast<pybind11::ssize_t>(b)};
|
| 857 |
+
}
|
| 858 |
+
py::detail::tuple_iterator end() const {
|
| 859 |
+
return {tup, static_cast<pybind11::ssize_t>(e)};
|
| 860 |
+
}
|
| 861 |
+
size_t size() const {
|
| 862 |
+
return e - b;
|
| 863 |
+
}
|
| 864 |
+
py::detail::tuple_accessor operator[](size_t index) const {
|
| 865 |
+
return {tup, static_cast<size_t>(b + index)};
|
| 866 |
+
}
|
| 867 |
+
|
| 868 |
+
private:
|
| 869 |
+
py::tuple tup;
|
| 870 |
+
int64_t b;
|
| 871 |
+
int64_t e;
|
| 872 |
+
};
|
| 873 |
+
|
| 874 |
+
inline bool validateFakeScriptObjectSchema(
|
| 875 |
+
const c10::FunctionSchema& schema,
|
| 876 |
+
size_t argumentPosition,
|
| 877 |
+
py::handle object) {
|
| 878 |
+
auto argument = schema.arguments().at(argumentPosition);
|
| 879 |
+
auto class_type = argument.real_type()->expect<c10::ClassType>();
|
| 880 |
+
auto fake_class_registry =
|
| 881 |
+
py::module::import("torch._library.fake_class_registry");
|
| 882 |
+
auto fake_class = fake_class_registry.attr("find_fake_class")(
|
| 883 |
+
class_type->name().value().qualifiedName());
|
| 884 |
+
if (!py::isinstance(object.attr("wrapped_obj"), fake_class)) {
|
| 885 |
+
throw schema_match_error(c10::str(
|
| 886 |
+
schema.formatTypeMismatchMsg(
|
| 887 |
+
argument,
|
| 888 |
+
friendlyTypeName(object),
|
| 889 |
+
argumentPosition,
|
| 890 |
+
py::repr(object.attr("wrapped_obj"))),
|
| 891 |
+
"\nCast error details: ",
|
| 892 |
+
argument.name(),
|
| 893 |
+
" is expected to be a FakeScriptObject of ",
|
| 894 |
+
class_type->name().value().qualifiedName()));
|
| 895 |
+
}
|
| 896 |
+
return true;
|
| 897 |
+
}
|
| 898 |
+
|
| 899 |
+
inline bool matchSchemaAllowFakeScriptObject(
|
| 900 |
+
const FunctionSchema& schema,
|
| 901 |
+
const tuple_slice& args,
|
| 902 |
+
const py::kwargs& kwargs) {
|
| 903 |
+
size_t all_arguments = args.size() + kwargs.size();
|
| 904 |
+
if (all_arguments > schema.arguments().size()) {
|
| 905 |
+
throw schema_match_error(c10::str(
|
| 906 |
+
schema.name(),
|
| 907 |
+
"() expected at most ",
|
| 908 |
+
schema.arguments().size(),
|
| 909 |
+
" argument(s) but received ",
|
| 910 |
+
all_arguments,
|
| 911 |
+
" argument(s). Declaration: ",
|
| 912 |
+
schema));
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
int64_t arg_idx = 0;
|
| 916 |
+
auto fake_class_registry =
|
| 917 |
+
py::module::import("torch._library.fake_class_registry");
|
| 918 |
+
|
| 919 |
+
// First push all positional args.
|
| 920 |
+
for (const auto& arg : args) {
|
| 921 |
+
// ...but refuse to do it if the schema says that this was supposed
|
| 922 |
+
// to be keyword only
|
| 923 |
+
if (schema.arguments()[arg_idx].kwarg_only()) {
|
| 924 |
+
throw schema_match_error(c10::str(
|
| 925 |
+
schema.name(),
|
| 926 |
+
"() takes ",
|
| 927 |
+
arg_idx,
|
| 928 |
+
" positional argument(s) but ",
|
| 929 |
+
args.size(),
|
| 930 |
+
" was/were given. Declaration: ",
|
| 931 |
+
schema));
|
| 932 |
+
}
|
| 933 |
+
// Use the type information from the schema to convert the PyObject.
|
| 934 |
+
const auto& argument = schema.arguments().at(arg_idx);
|
| 935 |
+
if (argument.real_type()->kind() == TypeKind::ClassType &&
|
| 936 |
+
py::isinstance(arg, fake_class_registry.attr("FakeScriptObject"))) {
|
| 937 |
+
validateFakeScriptObjectSchema(schema, arg_idx, arg);
|
| 938 |
+
} else {
|
| 939 |
+
argumentToIValue(schema, arg_idx, arg);
|
| 940 |
+
}
|
| 941 |
+
|
| 942 |
+
arg_idx++;
|
| 943 |
+
}
|
| 944 |
+
|
| 945 |
+
// Now for every remaining non-positional argument in the schema, look for it
|
| 946 |
+
// in the kwargs dict and push it if found, or use its default value if it
|
| 947 |
+
// has one.
|
| 948 |
+
size_t consumed_kwargs = 0;
|
| 949 |
+
for (size_t i = arg_idx; i < schema.arguments().size(); ++i) {
|
| 950 |
+
const auto& arg = schema.arguments()[i];
|
| 951 |
+
if (kwargs.contains(arg.name().c_str())) {
|
| 952 |
+
auto cur_kwarg = kwargs[arg.name().c_str()];
|
| 953 |
+
if (arg.real_type()->kind() == TypeKind::ClassType &&
|
| 954 |
+
py::isinstance(
|
| 955 |
+
cur_kwarg, fake_class_registry.attr("FakeScriptObject"))) {
|
| 956 |
+
validateFakeScriptObjectSchema(schema, i, cur_kwarg);
|
| 957 |
+
} else {
|
| 958 |
+
argumentToIValue(schema, i, cur_kwarg);
|
| 959 |
+
}
|
| 960 |
+
consumed_kwargs += 1;
|
| 961 |
+
} else if (arg.default_value()) {
|
| 962 |
+
continue;
|
| 963 |
+
} else {
|
| 964 |
+
throw schema_match_error(c10::str(
|
| 965 |
+
schema.name(),
|
| 966 |
+
"() is missing value for argument '",
|
| 967 |
+
arg.name(),
|
| 968 |
+
"'. Declaration: ",
|
| 969 |
+
schema));
|
| 970 |
+
}
|
| 971 |
+
}
|
| 972 |
+
|
| 973 |
+
if (consumed_kwargs != kwargs.size()) {
|
| 974 |
+
std::vector<std::string> names;
|
| 975 |
+
for (const auto& kwarg : kwargs) {
|
| 976 |
+
names.emplace_back(py::cast<std::string>(kwarg.first));
|
| 977 |
+
}
|
| 978 |
+
throw schema_match_error(schema.findErrorInKwargs(names));
|
| 979 |
+
}
|
| 980 |
+
|
| 981 |
+
return true;
|
| 982 |
+
}
|
| 983 |
+
|
| 984 |
+
inline Stack createStackForSchema(
|
| 985 |
+
const FunctionSchema& schema,
|
| 986 |
+
const tuple_slice& args,
|
| 987 |
+
const py::kwargs& kwargs,
|
| 988 |
+
std::optional<IValue> self) {
|
| 989 |
+
size_t all_arguments = (self ? 1 : 0) + args.size() + kwargs.size();
|
| 990 |
+
if (all_arguments > schema.arguments().size()) {
|
| 991 |
+
throw schema_match_error(c10::str(
|
| 992 |
+
schema.name(),
|
| 993 |
+
"() expected at most ",
|
| 994 |
+
schema.arguments().size(),
|
| 995 |
+
" argument(s) but received ",
|
| 996 |
+
all_arguments,
|
| 997 |
+
" argument(s). Declaration: ",
|
| 998 |
+
schema));
|
| 999 |
+
}
|
| 1000 |
+
Stack stack;
|
| 1001 |
+
stack.reserve(schema.arguments().size());
|
| 1002 |
+
|
| 1003 |
+
int64_t arg_idx = 0;
|
| 1004 |
+
if (self) {
|
| 1005 |
+
push(stack, std::move(*self));
|
| 1006 |
+
arg_idx++;
|
| 1007 |
+
}
|
| 1008 |
+
// First push all positional args.
|
| 1009 |
+
for (const auto& arg : args) {
|
| 1010 |
+
// ...but refuse to do it if the schema says that this was supposed
|
| 1011 |
+
// to be keyword only
|
| 1012 |
+
if (schema.arguments()[arg_idx].kwarg_only()) {
|
| 1013 |
+
throw schema_match_error(c10::str(
|
| 1014 |
+
schema.name(),
|
| 1015 |
+
"() takes ",
|
| 1016 |
+
arg_idx,
|
| 1017 |
+
" positional argument(s) but ",
|
| 1018 |
+
self ? 1 + args.size() : args.size(),
|
| 1019 |
+
" was/were given. Declaration: ",
|
| 1020 |
+
schema));
|
| 1021 |
+
}
|
| 1022 |
+
// Use the type information from the schema to convert the PyObject.
|
| 1023 |
+
push(stack, argumentToIValue(schema, stack.size(), arg));
|
| 1024 |
+
arg_idx++;
|
| 1025 |
+
}
|
| 1026 |
+
|
| 1027 |
+
// Now for every remaining non-positional argument in the schema, look for it
|
| 1028 |
+
// in the kwargs dict and push it if found, or use its default value if it
|
| 1029 |
+
// has one.
|
| 1030 |
+
size_t consumed_kwargs = 0;
|
| 1031 |
+
for (size_t i = stack.size(); i < schema.arguments().size(); ++i) {
|
| 1032 |
+
const auto& arg = schema.arguments()[i];
|
| 1033 |
+
if (kwargs.contains(arg.name().c_str())) {
|
| 1034 |
+
push(stack, argumentToIValue(schema, i, kwargs[arg.name().c_str()]));
|
| 1035 |
+
consumed_kwargs += 1;
|
| 1036 |
+
} else if (arg.default_value()) {
|
| 1037 |
+
push(stack, *arg.default_value());
|
| 1038 |
+
} else {
|
| 1039 |
+
throw schema_match_error(c10::str(
|
| 1040 |
+
schema.name(),
|
| 1041 |
+
"() is missing value for argument '",
|
| 1042 |
+
arg.name(),
|
| 1043 |
+
"'. Declaration: ",
|
| 1044 |
+
schema));
|
| 1045 |
+
}
|
| 1046 |
+
}
|
| 1047 |
+
|
| 1048 |
+
if (consumed_kwargs != kwargs.size()) {
|
| 1049 |
+
std::vector<std::string> names;
|
| 1050 |
+
for (const auto& kwarg : kwargs) {
|
| 1051 |
+
names.emplace_back(py::cast<std::string>(kwarg.first));
|
| 1052 |
+
}
|
| 1053 |
+
throw schema_match_error(schema.findErrorInKwargs(names));
|
| 1054 |
+
}
|
| 1055 |
+
|
| 1056 |
+
return stack;
|
| 1057 |
+
}
|
| 1058 |
+
|
| 1059 |
+
inline py::object createPyObjectForStack(Stack&& stack) {
|
| 1060 |
+
if (stack.empty()) {
|
| 1061 |
+
return py::none();
|
| 1062 |
+
}
|
| 1063 |
+
|
| 1064 |
+
// Return a simple value and not a single-element tuple if there is only one
|
| 1065 |
+
// return value.
|
| 1066 |
+
if (stack.size() == 1) {
|
| 1067 |
+
return toPyObject(std::move(stack[0]));
|
| 1068 |
+
}
|
| 1069 |
+
|
| 1070 |
+
// If there is more than one return value, pop them into a py::tuple.
|
| 1071 |
+
py::tuple return_values(stack.size());
|
| 1072 |
+
for (const auto ret : c10::irange(return_values.size())) {
|
| 1073 |
+
return_values[ret] = toPyObject(std::move(stack[ret]));
|
| 1074 |
+
}
|
| 1075 |
+
|
| 1076 |
+
return std::move(return_values);
|
| 1077 |
+
}
|
| 1078 |
+
|
| 1079 |
+
// TODO: Remove once we clean up the GraphExecutor usage.
|
| 1080 |
+
inline Stack evilDeprecatedBadCreateStackDoNotUse(
|
| 1081 |
+
const py::tuple& tuple,
|
| 1082 |
+
at::ArrayRef<Value*> inputs,
|
| 1083 |
+
size_t reserve_extra_space = 0) {
|
| 1084 |
+
if (tuple.size() != inputs.size()) {
|
| 1085 |
+
AT_ERROR(
|
| 1086 |
+
"expected " + std::to_string(inputs.size()) + " inputs, but got " +
|
| 1087 |
+
std::to_string(tuple.size()));
|
| 1088 |
+
}
|
| 1089 |
+
Stack result;
|
| 1090 |
+
result.reserve(tuple.size() + reserve_extra_space);
|
| 1091 |
+
for (const auto i : c10::irange(inputs.size())) {
|
| 1092 |
+
result.push_back(toIValue(std::move(tuple[i]), inputs[i]->type()));
|
| 1093 |
+
}
|
| 1094 |
+
return result;
|
| 1095 |
+
}
|
| 1096 |
+
|
| 1097 |
+
// Run `callee`, potentially inserting a CallFunction/CallMethod node into the
|
| 1098 |
+
// tracing graph.
|
| 1099 |
+
inline py::object runAndInsertCall(
|
| 1100 |
+
Function& callee,
|
| 1101 |
+
const tuple_slice& args,
|
| 1102 |
+
const py::kwargs& kwargs,
|
| 1103 |
+
std::optional<IValue> self,
|
| 1104 |
+
// Lambda that tells this function how to insert `callee` into the graph if
|
| 1105 |
+
// we're tracing.
|
| 1106 |
+
const std::function<Value*(Graph&, const MatchedSchema& match)>&
|
| 1107 |
+
callInserter) {
|
| 1108 |
+
auto stack =
|
| 1109 |
+
createStackForSchema(callee.getSchema(), args, kwargs, std::move(self));
|
| 1110 |
+
const auto& tracing_state = tracer::getTracingState();
|
| 1111 |
+
if (!tracing_state) {
|
| 1112 |
+
pybind11::gil_scoped_release no_gil_guard;
|
| 1113 |
+
// If we're not tracing, just run the callee as normal.
|
| 1114 |
+
callee.run(stack);
|
| 1115 |
+
} else {
|
| 1116 |
+
// If we are tracing, insert the appropriate CallFunction or CallMethod node
|
| 1117 |
+
// and then run the callee with tracing disabled.
|
| 1118 |
+
|
| 1119 |
+
// Get the graph `Value`s that represent the input IValues
|
| 1120 |
+
auto inputs = last(stack, callee.num_inputs());
|
| 1121 |
+
auto input_values =
|
| 1122 |
+
fmap(inputs, [](const IValue& v) { return tracer::getValueTrace(v); });
|
| 1123 |
+
TORCH_INTERNAL_ASSERT(callee.getSchema().returns().size() == 1)
|
| 1124 |
+
auto return_type = callee.getSchema().returns().at(0).type();
|
| 1125 |
+
auto graph = tracing_state->graph;
|
| 1126 |
+
std::vector<NamedValue> named_values;
|
| 1127 |
+
named_values.reserve(input_values.size());
|
| 1128 |
+
for (Value* v : input_values) {
|
| 1129 |
+
named_values.emplace_back(v);
|
| 1130 |
+
}
|
| 1131 |
+
|
| 1132 |
+
// Add a call node.
|
| 1133 |
+
MatchedSchema match = matchSchema(
|
| 1134 |
+
callee.getSchema(),
|
| 1135 |
+
tracer::getPythonInterpreterSourceRange(),
|
| 1136 |
+
*graph,
|
| 1137 |
+
named_values,
|
| 1138 |
+
{});
|
| 1139 |
+
auto output_value = callInserter(*graph, match);
|
| 1140 |
+
|
| 1141 |
+
// Actually run the callee. Pause the tracer so that we don't double-add the
|
| 1142 |
+
// callee nodes.
|
| 1143 |
+
{
|
| 1144 |
+
pybind11::gil_scoped_release no_gil_guard;
|
| 1145 |
+
ResourceGuard guard(tracer::pauseTracing());
|
| 1146 |
+
callee.run(stack);
|
| 1147 |
+
}
|
| 1148 |
+
|
| 1149 |
+
// Associate the output IValues with the output `Value`s in the graph
|
| 1150 |
+
tracer::setValueTrace(stack.back(), output_value);
|
| 1151 |
+
}
|
| 1152 |
+
|
| 1153 |
+
TORCH_CHECK(
|
| 1154 |
+
!stack.empty(),
|
| 1155 |
+
"Expected values in the stack after execution but found none");
|
| 1156 |
+
return toPyObject(std::move(stack.back()));
|
| 1157 |
+
}
|
| 1158 |
+
|
| 1159 |
+
inline std::optional<py::object> maybeTorchFunctionDispatch(
|
| 1160 |
+
const py::object& callee,
|
| 1161 |
+
const tuple_slice& args_no_self,
|
| 1162 |
+
const py::kwargs& kwargs,
|
| 1163 |
+
const c10::QualifiedName& qualname) {
|
| 1164 |
+
std::vector<py::handle> args_vec;
|
| 1165 |
+
for (const auto& arg : args_no_self) {
|
| 1166 |
+
args_vec.push_back(arg);
|
| 1167 |
+
}
|
| 1168 |
+
py::tuple args = py::cast(args_vec);
|
| 1169 |
+
|
| 1170 |
+
// Handle __torch_function__ dispatch
|
| 1171 |
+
std::vector<PyObject*> overloaded_args;
|
| 1172 |
+
size_t total_arg_num = args.size() + kwargs.size();
|
| 1173 |
+
for (const auto& arg : args) {
|
| 1174 |
+
is_tensor_and_append_overloaded(arg.ptr(), &overloaded_args);
|
| 1175 |
+
is_tensor_list_and_append_overloaded(
|
| 1176 |
+
arg.ptr(),
|
| 1177 |
+
&overloaded_args,
|
| 1178 |
+
static_cast<int>(total_arg_num),
|
| 1179 |
+
false /* throw_error */);
|
| 1180 |
+
}
|
| 1181 |
+
// NB: for kwargs, we cannot guarantee the order of appending
|
| 1182 |
+
// is the same as the argument order in operator's schema.
|
| 1183 |
+
// This is suboptimal, but should be fine. Later when we have
|
| 1184 |
+
// better schema matching and argument parsing, we could
|
| 1185 |
+
// match the operator in `operations` first, then the order will
|
| 1186 |
+
// be guaranteed.
|
| 1187 |
+
for (auto item : kwargs) {
|
| 1188 |
+
is_tensor_and_append_overloaded(item.second.ptr(), &overloaded_args);
|
| 1189 |
+
is_tensor_list_and_append_overloaded(
|
| 1190 |
+
item.second.ptr(),
|
| 1191 |
+
&overloaded_args,
|
| 1192 |
+
total_arg_num,
|
| 1193 |
+
false /* throw_error */);
|
| 1194 |
+
}
|
| 1195 |
+
if (!overloaded_args.empty()) {
|
| 1196 |
+
return pybind11::reinterpret_steal<py::object>(
|
| 1197 |
+
handle_torch_function_no_python_arg_parser(
|
| 1198 |
+
/*overloaded_args=*/overloaded_args,
|
| 1199 |
+
/*args=*/args.ptr(),
|
| 1200 |
+
/*kwargs=*/kwargs.ptr(),
|
| 1201 |
+
/*func_name=*/qualname.name().c_str(),
|
| 1202 |
+
/*torch_api_function=*/callee.ptr(),
|
| 1203 |
+
/*module_name=*/qualname.prefix().c_str()));
|
| 1204 |
+
}
|
| 1205 |
+
|
| 1206 |
+
return std::nullopt;
|
| 1207 |
+
}
|
| 1208 |
+
|
| 1209 |
+
inline py::object invokeScriptFunctionFromPython(
|
| 1210 |
+
Function& callee,
|
| 1211 |
+
const tuple_slice& args,
|
| 1212 |
+
const py::kwargs& kwargs) {
|
| 1213 |
+
// TODO: we could add __torch_function__ dispatch here but I don't know
|
| 1214 |
+
// the implications of doing so
|
| 1215 |
+
|
| 1216 |
+
return runAndInsertCall(
|
| 1217 |
+
callee,
|
| 1218 |
+
args,
|
| 1219 |
+
kwargs,
|
| 1220 |
+
/*self=*/std::nullopt,
|
| 1221 |
+
[&](Graph& graph, const MatchedSchema& match) {
|
| 1222 |
+
return graph.insertFunctionCall(&callee, match);
|
| 1223 |
+
});
|
| 1224 |
+
}
|
| 1225 |
+
|
| 1226 |
+
inline py::object invokeScriptMethodFromPython(
|
| 1227 |
+
Method& callee,
|
| 1228 |
+
const tuple_slice& args,
|
| 1229 |
+
const py::kwargs& kwargs) {
|
| 1230 |
+
auto self = callee.owner()._ivalue();
|
| 1231 |
+
|
| 1232 |
+
if (auto torch_fn_result = maybeTorchFunctionDispatch(
|
| 1233 |
+
py::cast(callee), args, kwargs, callee.name())) {
|
| 1234 |
+
return *torch_fn_result;
|
| 1235 |
+
}
|
| 1236 |
+
|
| 1237 |
+
return runAndInsertCall(
|
| 1238 |
+
callee.function(),
|
| 1239 |
+
args,
|
| 1240 |
+
kwargs,
|
| 1241 |
+
self,
|
| 1242 |
+
[&](Graph& graph, const MatchedSchema& match) {
|
| 1243 |
+
return graph.insertMethodCall(callee.name(), match);
|
| 1244 |
+
});
|
| 1245 |
+
}
|
| 1246 |
+
|
| 1247 |
+
TORCH_PYTHON_API std::pair<std::shared_ptr<Operator>, Stack> getOpWithStack(
|
| 1248 |
+
const std::vector<std::shared_ptr<Operator>>& operations,
|
| 1249 |
+
const py::args& args,
|
| 1250 |
+
const py::kwargs& kwargs);
|
| 1251 |
+
|
| 1252 |
+
TORCH_PYTHON_API py::object invokeOperatorFromPython(
|
| 1253 |
+
const std::vector<std::shared_ptr<Operator>>& operations,
|
| 1254 |
+
const py::args& args,
|
| 1255 |
+
const py::kwargs& kwargs,
|
| 1256 |
+
std::optional<c10::DispatchKey> dk = std::nullopt);
|
| 1257 |
+
|
| 1258 |
+
TORCH_PYTHON_API std::optional<py::object> _maybe_handle_torch_function(
|
| 1259 |
+
const std::string& ns,
|
| 1260 |
+
const std::string& method_name,
|
| 1261 |
+
const std::string& overload_name,
|
| 1262 |
+
bool is_overload,
|
| 1263 |
+
const py::args& args,
|
| 1264 |
+
const py::kwargs& kwargs);
|
| 1265 |
+
|
| 1266 |
+
TORCH_PYTHON_API bool checkSchemaAllowFakeScriptObject(
|
| 1267 |
+
const FunctionSchema& schema,
|
| 1268 |
+
const py::args& args,
|
| 1269 |
+
const py::kwargs& kwargs);
|
| 1270 |
+
|
| 1271 |
+
TORCH_PYTHON_API py::object _get_operation_for_overload_or_packet(
|
| 1272 |
+
const std::vector<std::shared_ptr<Operator>>& operations,
|
| 1273 |
+
Symbol symbol,
|
| 1274 |
+
const py::args& args,
|
| 1275 |
+
const py::kwargs& kwargs,
|
| 1276 |
+
bool is_overload,
|
| 1277 |
+
std::optional<c10::DispatchKey> dk = std::nullopt);
|
| 1278 |
+
|
| 1279 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tracer.h
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/frontend/source_range.h>
|
| 4 |
+
#include <torch/csrc/jit/frontend/tracer.h>
|
| 5 |
+
#include <torch/csrc/python_headers.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
#include <memory>
|
| 9 |
+
#include <string>
|
| 10 |
+
|
| 11 |
+
namespace torch::jit {
|
| 12 |
+
|
| 13 |
+
struct Module;
|
| 14 |
+
|
| 15 |
+
namespace tracer {
|
| 16 |
+
void initPythonTracerBindings(PyObject* module);
|
| 17 |
+
|
| 18 |
+
SourceRange getPythonInterpreterSourceRange();
|
| 19 |
+
|
| 20 |
+
Node* preRecordPythonTrace(
|
| 21 |
+
THPObjectPtr pyobj,
|
| 22 |
+
const std::string& arg_types,
|
| 23 |
+
at::ArrayRef<autograd::Variable> inputs,
|
| 24 |
+
std::vector<THPObjectPtr> scalar_args);
|
| 25 |
+
|
| 26 |
+
std::pair<std::shared_ptr<Graph>, Stack> createGraphByTracingWithDict(
|
| 27 |
+
const py::function& func,
|
| 28 |
+
const py::dict& inputs_dict,
|
| 29 |
+
const Stack& inputs,
|
| 30 |
+
const py::function& var_name_lookup_fn,
|
| 31 |
+
bool strict,
|
| 32 |
+
bool force_outplace,
|
| 33 |
+
Module* self = nullptr,
|
| 34 |
+
const std::vector<std::string>& argument_names = {});
|
| 35 |
+
|
| 36 |
+
std::pair<std::shared_ptr<Graph>, Stack> createGraphByTracing(
|
| 37 |
+
const py::function& func,
|
| 38 |
+
Stack inputs,
|
| 39 |
+
const py::function& var_name_lookup_fn,
|
| 40 |
+
bool strict,
|
| 41 |
+
bool force_outplace,
|
| 42 |
+
Module* self = nullptr,
|
| 43 |
+
const std::vector<std::string>& argument_names = {});
|
| 44 |
+
} // namespace tracer
|
| 45 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_tree_views.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::jit {
|
| 6 |
+
|
| 7 |
+
void initTreeViewBindings(PyObject* module);
|
| 8 |
+
|
| 9 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/update_graph_executor_opt.h
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/Export.h>
|
| 3 |
+
namespace torch::jit {
|
| 4 |
+
TORCH_API void setGraphExecutorOptimize(bool o);
|
| 5 |
+
TORCH_API bool getGraphExecutorOptimize();
|
| 6 |
+
} // namespace torch::jit
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h
ADDED
|
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/stmt.h>
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 7 |
+
|
| 8 |
+
#include <utility>
|
| 9 |
+
|
| 10 |
+
namespace torch::jit::tensorexpr {
|
| 11 |
+
class HasRand : public IRVisitor {
|
| 12 |
+
public:
|
| 13 |
+
HasRand(StmtPtr stmt) : stmt_(std::move(stmt)) {
|
| 14 |
+
stmt_->accept(this);
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
bool has_rand() const {
|
| 18 |
+
return has_rand_;
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
private:
|
| 22 |
+
void visit(const IntrinsicsPtr& v) override {
|
| 23 |
+
if (v->op_type() == IntrinsicsOp::kRand) {
|
| 24 |
+
has_rand_ = true;
|
| 25 |
+
} else {
|
| 26 |
+
IRVisitor::visit(v);
|
| 27 |
+
}
|
| 28 |
+
}
|
| 29 |
+
StmtPtr stmt_;
|
| 30 |
+
bool has_rand_ = false;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
template <typename Op>
|
| 34 |
+
class NodeFinder : public IRVisitor {
|
| 35 |
+
public:
|
| 36 |
+
void visit(const NodePtr<Op>& v) override {
|
| 37 |
+
nodes.push_back((NodePtr<Op>)v);
|
| 38 |
+
IRVisitor::visit(v);
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
static std::vector<NodePtr<Op>> find(const StmtPtr& s) {
|
| 42 |
+
NodeFinder<Op> nf;
|
| 43 |
+
s->accept(&nf);
|
| 44 |
+
return nf.nodes;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
static std::vector<NodePtr<Op>> find(const ExprPtr& e) {
|
| 48 |
+
NodeFinder<Op> nf;
|
| 49 |
+
e->accept(&nf);
|
| 50 |
+
return nf.nodes;
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
std::vector<NodePtr<Op>> nodes;
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
class VarFinder : public IRVisitor {
|
| 57 |
+
public:
|
| 58 |
+
void visit(const VarPtr& v) override {
|
| 59 |
+
vars_.insert(v);
|
| 60 |
+
IRVisitor::visit(v);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
static std::unordered_set<VarPtr> find(const StmtPtr& s) {
|
| 64 |
+
VarFinder nf;
|
| 65 |
+
s->accept(&nf);
|
| 66 |
+
return nf.vars();
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
static std::unordered_set<VarPtr> find(const ExprPtr& e) {
|
| 70 |
+
VarFinder nf;
|
| 71 |
+
e->accept(&nf);
|
| 72 |
+
return nf.vars();
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
const std::unordered_set<VarPtr>& vars() {
|
| 76 |
+
return vars_;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
private:
|
| 80 |
+
std::unordered_set<VarPtr> vars_;
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
class BufFinder : public IRVisitor {
|
| 84 |
+
public:
|
| 85 |
+
void visit(const BufPtr& v) override {
|
| 86 |
+
bufs_.insert(v);
|
| 87 |
+
IRVisitor::visit(v);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
static std::unordered_set<BufPtr> find(const StmtPtr& s) {
|
| 91 |
+
BufFinder nf;
|
| 92 |
+
s->accept(&nf);
|
| 93 |
+
return nf.bufs();
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
static std::unordered_set<BufPtr> find(const ExprPtr& e) {
|
| 97 |
+
BufFinder nf;
|
| 98 |
+
e->accept(&nf);
|
| 99 |
+
return nf.bufs();
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
const std::unordered_set<BufPtr>& bufs() {
|
| 103 |
+
return bufs_;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
private:
|
| 107 |
+
std::unordered_set<BufPtr> bufs_;
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
// Finds all kinds of write operations to the provided Buf.
|
| 111 |
+
class WritesToBuf : public IRVisitor {
|
| 112 |
+
public:
|
| 113 |
+
WritesToBuf(BufPtr target) : target_(std::move(target)) {}
|
| 114 |
+
|
| 115 |
+
std::vector<StmtPtr> writes() {
|
| 116 |
+
return writes_;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
static std::vector<StmtPtr> find(const StmtPtr& s, BufPtr b) {
|
| 120 |
+
WritesToBuf finder(std::move(b));
|
| 121 |
+
s->accept(&finder);
|
| 122 |
+
return finder.writes();
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
private:
|
| 126 |
+
void visit(const StorePtr& v) override {
|
| 127 |
+
if (v->buf() == target_) {
|
| 128 |
+
writes_.push_back(v);
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
void visit(const AtomicAddPtr& v) override {
|
| 133 |
+
if (v->buf() == target_) {
|
| 134 |
+
writes_.push_back(v);
|
| 135 |
+
}
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
BufPtr target_;
|
| 139 |
+
std::vector<StmtPtr> writes_;
|
| 140 |
+
};
|
| 141 |
+
|
| 142 |
+
class StmtsReadingBuf : public IRVisitor {
|
| 143 |
+
public:
|
| 144 |
+
StmtsReadingBuf(BufPtr target) : target_(std::move(target)) {}
|
| 145 |
+
|
| 146 |
+
std::vector<StmtPtr> reads() {
|
| 147 |
+
return reads_;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
static std::vector<StmtPtr> find(const StmtPtr& s, BufPtr b) {
|
| 151 |
+
StmtsReadingBuf finder(std::move(b));
|
| 152 |
+
s->accept(&finder);
|
| 153 |
+
return finder.reads();
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
private:
|
| 157 |
+
bool readsBuffer(const StmtPtr& s) {
|
| 158 |
+
auto loads = NodeFinder<Load>::find(s);
|
| 159 |
+
for (const auto& l : loads) {
|
| 160 |
+
if (l->buf() == target_) {
|
| 161 |
+
return true;
|
| 162 |
+
}
|
| 163 |
+
}
|
| 164 |
+
return false;
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
void visit(const StorePtr& v) override {
|
| 168 |
+
if (readsBuffer(v)) {
|
| 169 |
+
reads_.push_back(v);
|
| 170 |
+
}
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
void visit(const LetPtr& v) override {
|
| 174 |
+
if (readsBuffer(v)) {
|
| 175 |
+
reads_.push_back(v);
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
void visit(const CondPtr& v) override {
|
| 180 |
+
if (readsBuffer(v)) {
|
| 181 |
+
reads_.push_back(v);
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
void visit(const AtomicAddPtr& v) override {
|
| 186 |
+
if (readsBuffer(v)) {
|
| 187 |
+
reads_.push_back(v);
|
| 188 |
+
}
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
BufPtr target_;
|
| 192 |
+
std::vector<StmtPtr> reads_;
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
class ExternalAllocBufFinder : public IRVisitor {
|
| 196 |
+
public:
|
| 197 |
+
void visit(const ExternalCallWithAllocPtr& v) override {
|
| 198 |
+
const auto& bufs_out = v->buf_out_args();
|
| 199 |
+
bufs_.insert(bufs_out.begin(), bufs_out.end());
|
| 200 |
+
IRVisitor::visit(v);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
static std::unordered_set<BufPtr> find(const StmtPtr& s) {
|
| 204 |
+
ExternalAllocBufFinder f;
|
| 205 |
+
s->accept(&f);
|
| 206 |
+
return f.bufs();
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
static std::unordered_set<BufPtr> find(const ExprPtr& e) {
|
| 210 |
+
ExternalAllocBufFinder f;
|
| 211 |
+
e->accept(&f);
|
| 212 |
+
return f.bufs();
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
const std::unordered_set<BufPtr>& bufs() {
|
| 216 |
+
return bufs_;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
private:
|
| 220 |
+
std::unordered_set<BufPtr> bufs_;
|
| 221 |
+
};
|
| 222 |
+
|
| 223 |
+
// Traverses the IR to determine if a particular Var is modified within it.
|
| 224 |
+
class ModifiesVarChecker : public IRVisitor {
|
| 225 |
+
public:
|
| 226 |
+
ModifiesVarChecker(VarPtr v) : var_(std::move(v)) {}
|
| 227 |
+
|
| 228 |
+
static bool check(const StmtPtr& s, VarPtr v) {
|
| 229 |
+
ModifiesVarChecker checker(std::move(v));
|
| 230 |
+
s->accept(&checker);
|
| 231 |
+
return checker.found();
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
bool found() {
|
| 235 |
+
return found_;
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
private:
|
| 239 |
+
void visit(const StorePtr& v) override {
|
| 240 |
+
if (v->buf()->base_handle() == var_) {
|
| 241 |
+
found_ = true;
|
| 242 |
+
return;
|
| 243 |
+
}
|
| 244 |
+
IRVisitor::visit(v);
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
void visit(const AtomicAddPtr& v) override {
|
| 248 |
+
if (v->buf()->base_handle() == var_) {
|
| 249 |
+
found_ = true;
|
| 250 |
+
return;
|
| 251 |
+
}
|
| 252 |
+
IRVisitor::visit(v);
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
void visit(const LetPtr& v) override {
|
| 256 |
+
if (v->var() == var_) {
|
| 257 |
+
found_ = true;
|
| 258 |
+
return;
|
| 259 |
+
}
|
| 260 |
+
IRVisitor::visit(v);
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
void visit(const ForPtr& v) override {
|
| 264 |
+
if (v->var() == var_) {
|
| 265 |
+
found_ = true;
|
| 266 |
+
return;
|
| 267 |
+
}
|
| 268 |
+
IRVisitor::visit(v);
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
VarPtr var_;
|
| 272 |
+
bool found_{false};
|
| 273 |
+
};
|
| 274 |
+
|
| 275 |
+
// Traverse the Block stmt to identify the live range of the specified buf. The
|
| 276 |
+
// live range, indicated by a pair of integers, specifies the first and last
|
| 277 |
+
// stmt in block stmts that access to the buf.
|
| 278 |
+
class BufLiveRange : public IRVisitor {
|
| 279 |
+
public:
|
| 280 |
+
BufLiveRange(BufPtr b) : buf_(std::move(b)) {}
|
| 281 |
+
|
| 282 |
+
static std::tuple<int32_t, int32_t> liveRange(const StmtPtr& s, BufPtr b) {
|
| 283 |
+
BlockPtr block = to<Block>(s);
|
| 284 |
+
// We Only analyze buffer live ranges for block stmts.
|
| 285 |
+
if (!block) {
|
| 286 |
+
return std::make_tuple(0, 0);
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
BufLiveRange analyzer(std::move(b));
|
| 290 |
+
block->accept(&analyzer);
|
| 291 |
+
return analyzer.getLiveRange();
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
private:
|
| 295 |
+
std::tuple<int32_t, int32_t> getLiveRange() {
|
| 296 |
+
return std::make_tuple(begin_, end_);
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
bool hasBufReads(const StmtPtr& s) {
|
| 300 |
+
auto loads1 = NodeFinder<Load>::find(s);
|
| 301 |
+
for (const auto& l : loads1) {
|
| 302 |
+
if (l->buf() == buf_) {
|
| 303 |
+
return true;
|
| 304 |
+
}
|
| 305 |
+
}
|
| 306 |
+
auto loads2 = NodeFinder<ExternalCall>::find(s);
|
| 307 |
+
for (const auto& l : loads2) {
|
| 308 |
+
for (const auto& lb : l->buf_args()) {
|
| 309 |
+
if (lb == buf_) {
|
| 310 |
+
return true;
|
| 311 |
+
}
|
| 312 |
+
}
|
| 313 |
+
}
|
| 314 |
+
auto loads3 = NodeFinder<ExternalCallWithAlloc>::find(s);
|
| 315 |
+
for (const auto& l : loads3) {
|
| 316 |
+
for (const auto& lb : l->buf_args()) {
|
| 317 |
+
if (lb == buf_) {
|
| 318 |
+
return true;
|
| 319 |
+
}
|
| 320 |
+
}
|
| 321 |
+
}
|
| 322 |
+
return false;
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
bool hasBufWrites(const StmtPtr& s) {
|
| 326 |
+
auto writes1 = NodeFinder<Store>::find(s);
|
| 327 |
+
for (const auto& w : writes1) {
|
| 328 |
+
if (w->buf() == buf_) {
|
| 329 |
+
return true;
|
| 330 |
+
}
|
| 331 |
+
}
|
| 332 |
+
auto writes2 = NodeFinder<ExternalCall>::find(s);
|
| 333 |
+
for (const auto& w : writes2) {
|
| 334 |
+
if (w->buf() == buf_) {
|
| 335 |
+
return true;
|
| 336 |
+
}
|
| 337 |
+
}
|
| 338 |
+
auto writes3 = NodeFinder<ExternalCallWithAlloc>::find(s);
|
| 339 |
+
for (const auto& w : writes3) {
|
| 340 |
+
for (const auto& wb : w->buf_out_args()) {
|
| 341 |
+
if (wb == buf_) {
|
| 342 |
+
return true;
|
| 343 |
+
}
|
| 344 |
+
}
|
| 345 |
+
}
|
| 346 |
+
return false;
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
void findAccAndUpdateLiveRange(const StmtPtr& s) {
|
| 350 |
+
bool has_reads = hasBufReads(s), has_writes = hasBufWrites(s);
|
| 351 |
+
if (has_reads || has_writes) {
|
| 352 |
+
if (begin_ == -1) {
|
| 353 |
+
begin_ = curr_index_;
|
| 354 |
+
};
|
| 355 |
+
end_ = curr_index_;
|
| 356 |
+
}
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
void visit(const BlockPtr& v) override {
|
| 360 |
+
for (const StmtPtr& s : *v) {
|
| 361 |
+
curr_index_ += 1;
|
| 362 |
+
findAccAndUpdateLiveRange(s);
|
| 363 |
+
}
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
BufPtr buf_;
|
| 367 |
+
int32_t begin_ = -1;
|
| 368 |
+
int32_t end_ = -1;
|
| 369 |
+
int32_t curr_index_ = -1;
|
| 370 |
+
};
|
| 371 |
+
|
| 372 |
+
// A class that analyzes the given program relevant for Block backend
|
| 373 |
+
// It creates a map of multi dim buffers and their flat versions
|
| 374 |
+
class CreateBufferMap : public IRVisitor {
|
| 375 |
+
public:
|
| 376 |
+
const std::unordered_map<std::string, BufPtr>& getBufferMap() const {
|
| 377 |
+
return map_input_to_tensor_bufs_;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
private:
|
| 381 |
+
void visit(const StorePtr& v) override {
|
| 382 |
+
auto load_node = to<Load>(v->value());
|
| 383 |
+
if (load_node) {
|
| 384 |
+
auto t_buf = load_node->buf();
|
| 385 |
+
map_input_to_tensor_bufs_.emplace(t_buf->name_hint(), v->buf());
|
| 386 |
+
} else {
|
| 387 |
+
auto add_node = to<Add>(v->value());
|
| 388 |
+
auto mul_node = to<Mul>(v->value());
|
| 389 |
+
// This means for now, v->value() can be Add or Mul
|
| 390 |
+
TORCH_INTERNAL_ASSERT(add_node || mul_node, buildErrorMessage());
|
| 391 |
+
map_input_to_tensor_bufs_.emplace(v->buf()->name_hint(), v->buf());
|
| 392 |
+
}
|
| 393 |
+
v->value()->accept(this);
|
| 394 |
+
}
|
| 395 |
+
std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
|
| 396 |
+
};
|
| 397 |
+
|
| 398 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <string>
|
| 4 |
+
#include <unordered_map>
|
| 5 |
+
#include <unordered_set>
|
| 6 |
+
#include <utility>
|
| 7 |
+
|
| 8 |
+
#include <ATen/ATen.h>
|
| 9 |
+
#include <torch/csrc/jit/resource_guard.h>
|
| 10 |
+
#include <torch/csrc/jit/tensorexpr/analysis.h>
|
| 11 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 12 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 13 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 14 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 15 |
+
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
|
| 16 |
+
|
| 17 |
+
namespace torch::jit::tensorexpr {
|
| 18 |
+
|
| 19 |
+
// A class that analyzes the given program relevant for Block backend.
|
| 20 |
+
class BlockAnalysis : public IRVisitor {
|
| 21 |
+
public:
|
| 22 |
+
bool is_buf_store_target(const BufPtr& buf) const {
|
| 23 |
+
return store_targets_.count(buf) > 0;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
const std::unordered_set<BufPtr>& loads() const {
|
| 27 |
+
return loads_;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
const std::unordered_set<BufPtr>& stores() const {
|
| 31 |
+
return store_targets_;
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
int64_t block_size() const {
|
| 35 |
+
return block_size_;
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
bool areBufsInMap(const std::unordered_set<BufPtr>& bufs) const;
|
| 39 |
+
|
| 40 |
+
BufPtr getMultiDimBuf(const BufPtr& buf) const;
|
| 41 |
+
|
| 42 |
+
std::string getInputName(const BufPtr& buf) const;
|
| 43 |
+
|
| 44 |
+
std::string getFlatInputName(const BufPtr& buf) const {
|
| 45 |
+
return getInputName(buf) + "_flat";
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
std::unordered_map<std::string, BufPtr> getBufferMap() const {
|
| 49 |
+
return map_input_to_tensor_bufs_;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
private:
|
| 53 |
+
void visit(const StorePtr& v) override;
|
| 54 |
+
void visit(const LoadPtr& v) override;
|
| 55 |
+
void visit(const ForPtr& v) override;
|
| 56 |
+
|
| 57 |
+
std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
|
| 58 |
+
std::unordered_set<BufPtr> store_targets_;
|
| 59 |
+
std::unordered_set<BufPtr> loads_;
|
| 60 |
+
int64_t block_size_ = 32;
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
// A class that overrides the underlying IRPrinter to produce Block.
|
| 64 |
+
class BlockPrinter : public IRPrinter {
|
| 65 |
+
public:
|
| 66 |
+
BlockPrinter(std::ostream* os, BlockAnalysis* block_analysis)
|
| 67 |
+
: IRPrinter(*os), block_analysis_(block_analysis) {}
|
| 68 |
+
|
| 69 |
+
using IRPrinter::name_manager;
|
| 70 |
+
using IRPrinter::visit;
|
| 71 |
+
|
| 72 |
+
private:
|
| 73 |
+
BlockAnalysis* block_analysis_;
|
| 74 |
+
std::unordered_map<std::string, int> dim_values_map;
|
| 75 |
+
std::vector<std::string> dim_names = {"N", "H", "W", "C"};
|
| 76 |
+
std::vector<std::string> flat_dim_names = {"N", "NH", "NHW", "NHWC"};
|
| 77 |
+
void PrintTensorInfo(const std::unordered_set<BufPtr>& bufs);
|
| 78 |
+
void PrintArguments(const std::unordered_set<BufPtr>& bufs);
|
| 79 |
+
void PrintBufferInfo(const std::unordered_set<BufPtr>& bufs);
|
| 80 |
+
void PrintDistribution(const std::unordered_set<BufPtr>& bufs);
|
| 81 |
+
void PrintLoop(const std::unordered_set<BufPtr>& bufs, bool block_idx = true);
|
| 82 |
+
void PrintReshapeInfo(
|
| 83 |
+
const std::unordered_set<BufPtr>& bufs,
|
| 84 |
+
bool reverse = false);
|
| 85 |
+
void PrintDMAs(const std::unordered_set<BufPtr>& bufs);
|
| 86 |
+
void PrintAdjustBuffers(const std::unordered_set<BufPtr>& bufs);
|
| 87 |
+
|
| 88 |
+
void visit(const ForPtr& v) override;
|
| 89 |
+
void visit(const LoadPtr& v) override;
|
| 90 |
+
void visit(const StorePtr& v) override;
|
| 91 |
+
void visit(const BlockPtr& v) override;
|
| 92 |
+
void visit(const AddPtr& v) override;
|
| 93 |
+
void visit(const MulPtr& v) override;
|
| 94 |
+
};
|
| 95 |
+
|
| 96 |
+
class TORCH_API BlockCodeGen : public CodeGen {
|
| 97 |
+
public:
|
| 98 |
+
template <typename... Ts>
|
| 99 |
+
/* implicit */
|
| 100 |
+
BlockCodeGen(StmtPtr stmt, Ts... ts)
|
| 101 |
+
: CodeGen(
|
| 102 |
+
stmt,
|
| 103 |
+
std::vector<BufferArg>({BufferArg(ts)...}),
|
| 104 |
+
at::Device(at::kCPU)) {
|
| 105 |
+
Initialize();
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
BlockCodeGen(
|
| 109 |
+
StmtPtr stmt,
|
| 110 |
+
const std::vector<BufferArg>& buffer_args,
|
| 111 |
+
at::Device device = at::Device(at::kCPU),
|
| 112 |
+
const std::string& kernel_func_name = "func")
|
| 113 |
+
: CodeGen(std::move(stmt), buffer_args, device, kernel_func_name) {
|
| 114 |
+
Initialize();
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
~BlockCodeGen() override;
|
| 118 |
+
|
| 119 |
+
void call(const std::vector<CallArg>& args) override;
|
| 120 |
+
void call_raw(const std::vector<void*>& args) override;
|
| 121 |
+
|
| 122 |
+
void Initialize();
|
| 123 |
+
|
| 124 |
+
std::string getCodeText(const std::string& attr = "") override {
|
| 125 |
+
return oss_.str();
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
private:
|
| 129 |
+
UniqueNameManager* name_manager() {
|
| 130 |
+
if (!printer_) {
|
| 131 |
+
throw std::runtime_error("Null IRPrinter is not expected");
|
| 132 |
+
}
|
| 133 |
+
return printer_->name_manager();
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
std::ostream& os() {
|
| 137 |
+
return printer_->os();
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
std::ostringstream oss_;
|
| 141 |
+
std::unique_ptr<BlockPrinter> printer_;
|
| 142 |
+
std::unique_ptr<BlockAnalysis> block_analysis_;
|
| 143 |
+
|
| 144 |
+
std::string GetUniqueFuncName(const std::string& func_prefix);
|
| 145 |
+
};
|
| 146 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <unordered_map>
|
| 4 |
+
#include <vector>
|
| 5 |
+
|
| 6 |
+
#include <torch/csrc/Export.h>
|
| 7 |
+
#include <torch/csrc/jit/tensorexpr/mem_dependency_checker.h>
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace jit {
|
| 11 |
+
namespace tensorexpr {
|
| 12 |
+
|
| 13 |
+
class Expr;
|
| 14 |
+
class Buf;
|
| 15 |
+
class Stmt;
|
| 16 |
+
|
| 17 |
+
enum C10_API_ENUM TensorAccessKind { kLoad, kStore, kMutate };
|
| 18 |
+
|
| 19 |
+
struct TORCH_API TensorAccessBoundsInfo {
|
| 20 |
+
TensorAccessKind kind;
|
| 21 |
+
std::vector<ExprPtr> start;
|
| 22 |
+
std::vector<ExprPtr> stop;
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
using BoundsInfo =
|
| 26 |
+
std::unordered_map<BufPtr, std::vector<TensorAccessBoundsInfo>>;
|
| 27 |
+
|
| 28 |
+
TORCH_API BoundsInfo
|
| 29 |
+
inferBounds(const StmtPtr& s, bool distinctAccessKinds = true);
|
| 30 |
+
|
| 31 |
+
// Bounds inference caching the analysis. The MemDependencyChecker must already
|
| 32 |
+
// have been run.
|
| 33 |
+
TORCH_API BoundsInfo getInferredBounds(
|
| 34 |
+
analysis::MemDependencyChecker& analyzer,
|
| 35 |
+
const StmtPtr& s,
|
| 36 |
+
bool distinctAccessKinds = true);
|
| 37 |
+
TORCH_API BoundsInfo getInferredBounds(
|
| 38 |
+
analysis::MemDependencyChecker& analyzer,
|
| 39 |
+
const ExprPtr& e,
|
| 40 |
+
bool distinctAccessKinds = true);
|
| 41 |
+
|
| 42 |
+
TORCH_API void printBoundsInfo(const BoundsInfo& v);
|
| 43 |
+
|
| 44 |
+
TORCH_API std::vector<ExprPtr> getBoundExtents(
|
| 45 |
+
const std::vector<TensorAccessBoundsInfo>& infos);
|
| 46 |
+
|
| 47 |
+
// The kind of dependency found, in increasing order of exclusivity.
|
| 48 |
+
enum class HazardKind {
|
| 49 |
+
ReadAfterWrite,
|
| 50 |
+
WriteAfterRead,
|
| 51 |
+
WriteAfterWrite,
|
| 52 |
+
NoDependency,
|
| 53 |
+
};
|
| 54 |
+
TORCH_API HazardKind getPotentialHazards(
|
| 55 |
+
analysis::MemDependencyChecker& analyzer,
|
| 56 |
+
const StmtPtr& A,
|
| 57 |
+
const StmtPtr& B);
|
| 58 |
+
|
| 59 |
+
// Returns true if there is a conflicting overlap between accesses in
|
| 60 |
+
// statements A and B. A conflicting overlap is an overlap in buffer accesses
|
| 61 |
+
// where at least one of the accesses is a Store.
|
| 62 |
+
TORCH_API bool hasConflictingOverlap(
|
| 63 |
+
analysis::MemDependencyChecker& analyzer,
|
| 64 |
+
const StmtPtr& A,
|
| 65 |
+
const StmtPtr& B);
|
| 66 |
+
// Same as above, between accesses in stores S1 and S2.
|
| 67 |
+
TORCH_API bool isOverlapping(
|
| 68 |
+
analysis::MemDependencyChecker& analyzer,
|
| 69 |
+
const StorePtr& S1,
|
| 70 |
+
const StorePtr& S2);
|
| 71 |
+
// Same as above, between accesses in store S and load L.
|
| 72 |
+
TORCH_API bool isOverlapping(
|
| 73 |
+
analysis::MemDependencyChecker& analyzer,
|
| 74 |
+
const StorePtr& S,
|
| 75 |
+
const LoadPtr& L);
|
| 76 |
+
|
| 77 |
+
} // namespace tensorexpr
|
| 78 |
+
} // namespace jit
|
| 79 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 5 |
+
|
| 6 |
+
namespace torch::jit::tensorexpr {
|
| 7 |
+
|
| 8 |
+
class CppVarNameRewriter;
|
| 9 |
+
|
| 10 |
+
// Generates C++ code from the IR.
|
| 11 |
+
//
|
| 12 |
+
// Vector operations are unrolled.
|
| 13 |
+
// For example:
|
| 14 |
+
// C[Ramp(0, 1, 3)] = A[Ramp(0, 2, 3)] + B[Ramp(0, 3, 3)];
|
| 15 |
+
// is unrolled into:
|
| 16 |
+
// C[0] = A[0] + B[0];
|
| 17 |
+
// C[1] = A[2] + B[3];
|
| 18 |
+
// C[2] = A[4] + B[6];
|
| 19 |
+
class TORCH_API CppPrinter : public IRPrinter {
|
| 20 |
+
public:
|
| 21 |
+
explicit CppPrinter(std::ostream* os);
|
| 22 |
+
~CppPrinter() override;
|
| 23 |
+
|
| 24 |
+
void printPrologue();
|
| 25 |
+
|
| 26 |
+
using IRPrinter::visit;
|
| 27 |
+
|
| 28 |
+
// Binary expressions.
|
| 29 |
+
void visit(const ModPtr&) override;
|
| 30 |
+
void visit(const MaxPtr&) override;
|
| 31 |
+
void visit(const MinPtr&) override;
|
| 32 |
+
|
| 33 |
+
// Conditional expressions.
|
| 34 |
+
void visit(const CompareSelectPtr&) override;
|
| 35 |
+
void visit(const IfThenElsePtr&) override;
|
| 36 |
+
|
| 37 |
+
// Tensor operations.
|
| 38 |
+
void visit(const AllocatePtr&) override;
|
| 39 |
+
void visit(const FreePtr&) override;
|
| 40 |
+
void visit(const LoadPtr&) override;
|
| 41 |
+
void visit(const StorePtr&) override;
|
| 42 |
+
|
| 43 |
+
// Casts.
|
| 44 |
+
void visit(const CastPtr&) override;
|
| 45 |
+
void visit(const BitCastPtr&) override;
|
| 46 |
+
|
| 47 |
+
// Calls.
|
| 48 |
+
void visit(const IntrinsicsPtr&) override;
|
| 49 |
+
void visit(const ExternalCallPtr&) override;
|
| 50 |
+
|
| 51 |
+
// Vars.
|
| 52 |
+
void visit(const LetPtr&) override;
|
| 53 |
+
void visit(const VarPtr&) override;
|
| 54 |
+
|
| 55 |
+
// Vector data types.
|
| 56 |
+
void visit(const RampPtr&) override;
|
| 57 |
+
void visit(const BroadcastPtr&) override;
|
| 58 |
+
|
| 59 |
+
private:
|
| 60 |
+
int lane_;
|
| 61 |
+
std::unordered_map<VarPtr, ExprPtr> vector_vars_;
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
class TORCH_API CppCodeGen : public CodeGen {
|
| 65 |
+
public:
|
| 66 |
+
CppCodeGen(
|
| 67 |
+
StmtPtr stmt,
|
| 68 |
+
const std::vector<BufferArg>& buffer_args,
|
| 69 |
+
at::Device device = at::kCPU,
|
| 70 |
+
const std::string& kernel_func_name = "func");
|
| 71 |
+
|
| 72 |
+
~CppCodeGen() override;
|
| 73 |
+
|
| 74 |
+
void call(const std::vector<CallArg>& args) override;
|
| 75 |
+
void call_raw(const std::vector<void*>& args) override;
|
| 76 |
+
|
| 77 |
+
template <typename... Ts>
|
| 78 |
+
void operator()(const Ts&... ts) {
|
| 79 |
+
call(std::vector<CallArg>({CallArg(ts)...}));
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
std::string getCodeText(const std::string& attr = "") override {
|
| 83 |
+
return oss_.str();
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
private:
|
| 87 |
+
void init();
|
| 88 |
+
|
| 89 |
+
std::ostream& os() {
|
| 90 |
+
return printer_->os();
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
std::ostringstream oss_;
|
| 94 |
+
std::unique_ptr<CppPrinter> printer_;
|
| 95 |
+
std::unique_ptr<CppVarNameRewriter> var_name_rewriter_;
|
| 96 |
+
};
|
| 97 |
+
|
| 98 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch {
|
| 4 |
+
namespace jit {
|
| 5 |
+
namespace tensorexpr {
|
| 6 |
+
|
| 7 |
+
constexpr auto cpp_intrinsics_definition = R"(
|
| 8 |
+
namespace std {
|
| 9 |
+
|
| 10 |
+
template <typename T,
|
| 11 |
+
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
|
| 12 |
+
T rsqrt(T v) {
|
| 13 |
+
return 1.0f / std::sqrt(v);
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
template <typename T,
|
| 17 |
+
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
|
| 18 |
+
T frac(T v) {
|
| 19 |
+
T intpart;
|
| 20 |
+
return std::modf(v, &intpart);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
template <typename From, typename To>
|
| 24 |
+
To bitcast(const From& v) {
|
| 25 |
+
assert(sizeof(To) == sizeof(From));
|
| 26 |
+
To res;
|
| 27 |
+
std::memcpy(&res, &v, sizeof(From));
|
| 28 |
+
return res;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
} // namespace std
|
| 32 |
+
)";
|
| 33 |
+
|
| 34 |
+
} // namespace tensorexpr
|
| 35 |
+
} // namespace jit
|
| 36 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <unordered_set>
|
| 4 |
+
|
| 5 |
+
#include <ATen/ATen.h>
|
| 6 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 7 |
+
#include <ATen/cuda/nvrtc_stub/ATenNVRTC.h>
|
| 8 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 9 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 10 |
+
#include <torch/csrc/jit/resource_guard.h>
|
| 11 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 12 |
+
#include <torch/csrc/jit/tensorexpr/eval.h>
|
| 13 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 14 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 15 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 16 |
+
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
|
| 17 |
+
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
|
| 18 |
+
|
| 19 |
+
namespace torch::jit::tensorexpr {
|
| 20 |
+
|
| 21 |
+
// A class that analyzes the given program relevant for Cuda backends.
|
| 22 |
+
class CudaAnalysis : public IRVisitor {
|
| 23 |
+
public:
|
| 24 |
+
CudaAnalysis() {
|
| 25 |
+
gpu_block_extents_ = {alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
|
| 26 |
+
gpu_thread_extents_ = {
|
| 27 |
+
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
|
| 28 |
+
}
|
| 29 |
+
bool is_buf_store_target(const BufPtr& buf) const {
|
| 30 |
+
return store_targets_.count(buf) > 0;
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
const std::unordered_set<VarPtr>& thread_local_bufs() const {
|
| 34 |
+
return thread_local_bufs_;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
const std::unordered_set<VarPtr>& cross_block_bufs() const {
|
| 38 |
+
return cross_block_bufs_;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
const std::vector<ExprPtr>& gpu_block_extents() const {
|
| 42 |
+
return gpu_block_extents_;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
const std::vector<ExprPtr>& gpu_thread_extents() const {
|
| 46 |
+
return gpu_thread_extents_;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
private:
|
| 50 |
+
void visit(const StorePtr& v) override {
|
| 51 |
+
store_targets_.insert(v->buf());
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
void visit(const AllocatePtr& v) override;
|
| 55 |
+
void visit(const FreePtr& v) override;
|
| 56 |
+
void visit(const PlacementAllocatePtr& v) override;
|
| 57 |
+
void visit(const ForPtr& v) override;
|
| 58 |
+
|
| 59 |
+
std::unordered_set<BufPtr> store_targets_;
|
| 60 |
+
std::unordered_set<VarPtr> thread_local_bufs_;
|
| 61 |
+
std::unordered_set<VarPtr> cross_block_bufs_;
|
| 62 |
+
|
| 63 |
+
std::vector<ExprPtr> gpu_block_extents_;
|
| 64 |
+
std::vector<ExprPtr> gpu_thread_extents_;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
// An IRMutator that replaces binding loop options with Cuda metavars, and masks
|
| 68 |
+
// statements blocks which should execute with less reach than the launch
|
| 69 |
+
// parameter extent.
|
| 70 |
+
//
|
| 71 |
+
// We do this by segmenting each block into chunks which should have the same
|
| 72 |
+
// execution parameters, then if those params differ from the max mask each dim.
|
| 73 |
+
class GPUMetaVarRewriter : public IRMutator {
|
| 74 |
+
public:
|
| 75 |
+
explicit GPUMetaVarRewriter(const CudaAnalysis* cuda_analysis)
|
| 76 |
+
: cuda_analysis_(cuda_analysis) {
|
| 77 |
+
gpu_block_vars_ = {
|
| 78 |
+
alloc<Var>("blockIdx.x", kInt),
|
| 79 |
+
alloc<Var>("blockIdx.y", kInt),
|
| 80 |
+
alloc<Var>("blockIdx.z", kInt)};
|
| 81 |
+
gpu_thread_vars_ = {
|
| 82 |
+
alloc<Var>("threadIdx.x", kInt),
|
| 83 |
+
alloc<Var>("threadIdx.y", kInt),
|
| 84 |
+
alloc<Var>("threadIdx.z", kInt)};
|
| 85 |
+
|
| 86 |
+
current_block_reach_ = {
|
| 87 |
+
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
|
| 88 |
+
current_thread_reach_ = {
|
| 89 |
+
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
StmtPtr mutate(const ForPtr& v) override;
|
| 93 |
+
StmtPtr mutate(const BlockPtr& v) override;
|
| 94 |
+
|
| 95 |
+
const std::vector<VarPtr>& gpu_block_vars() const {
|
| 96 |
+
return gpu_block_vars_;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
const std::vector<VarPtr>& gpu_thread_vars() const {
|
| 100 |
+
return gpu_thread_vars_;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
const std::vector<ExprPtr>& gpu_block_extents() const {
|
| 104 |
+
return cuda_analysis_->gpu_block_extents();
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
const std::vector<ExprPtr>& gpu_thread_extents() const {
|
| 108 |
+
return cuda_analysis_->gpu_thread_extents();
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
private:
|
| 112 |
+
// When processing a block, stores the contents of each sub-segment.
|
| 113 |
+
class Segment {
|
| 114 |
+
public:
|
| 115 |
+
void reset(bool mask) {
|
| 116 |
+
stmts_.clear();
|
| 117 |
+
mask_ = mask;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
bool empty() const {
|
| 121 |
+
return stmts_.empty();
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
std::vector<StmtPtr>& stmts() {
|
| 125 |
+
return stmts_;
|
| 126 |
+
}
|
| 127 |
+
bool mask() {
|
| 128 |
+
return mask_;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
private:
|
| 132 |
+
std::vector<StmtPtr> stmts_;
|
| 133 |
+
bool mask_{true};
|
| 134 |
+
};
|
| 135 |
+
|
| 136 |
+
// Returns true if the current execution scope is equivalent to the launch
|
| 137 |
+
// parameters.
|
| 138 |
+
bool isFullExtent();
|
| 139 |
+
|
| 140 |
+
std::vector<VarPtr> gpu_block_vars_;
|
| 141 |
+
std::vector<VarPtr> gpu_thread_vars_;
|
| 142 |
+
|
| 143 |
+
std::vector<ExprPtr> current_block_reach_;
|
| 144 |
+
std::vector<ExprPtr> current_thread_reach_;
|
| 145 |
+
|
| 146 |
+
const CudaAnalysis* cuda_analysis_;
|
| 147 |
+
};
|
| 148 |
+
|
| 149 |
+
// A class that overrides the underlying IRPrinter to produce Cuda C.
|
| 150 |
+
class CudaPrinter : public IRPrinter {
|
| 151 |
+
public:
|
| 152 |
+
explicit CudaPrinter(
|
| 153 |
+
std::ostream* os,
|
| 154 |
+
const CudaAnalysis* cuda_analysis,
|
| 155 |
+
bool has_random)
|
| 156 |
+
: IRPrinter(*os), cuda_analysis_(cuda_analysis) {
|
| 157 |
+
if (has_random) {
|
| 158 |
+
rand_func_ = alloc<Var>("rand", kHandle);
|
| 159 |
+
}
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
void visit(const CastPtr& v) override;
|
| 163 |
+
void visit(const IntrinsicsPtr& v) override;
|
| 164 |
+
void visit(const ForPtr& v) override;
|
| 165 |
+
|
| 166 |
+
void visit(const LoadPtr& v) override;
|
| 167 |
+
void visit(const StorePtr& v) override;
|
| 168 |
+
void visit(const AtomicAddPtr& v) override;
|
| 169 |
+
void visit(const MaxPtr& v) override;
|
| 170 |
+
void visit(const MinPtr& v) override;
|
| 171 |
+
void visit(const IfThenElsePtr& v) override;
|
| 172 |
+
void visit(const BlockPtr& v) override;
|
| 173 |
+
void visit(const AllocatePtr& v) override;
|
| 174 |
+
void visit(const FreePtr& v) override;
|
| 175 |
+
void visit(const LetPtr& v) override;
|
| 176 |
+
|
| 177 |
+
void visit(const ExternalCallPtr& v) override;
|
| 178 |
+
|
| 179 |
+
VarPtr rand_func() const {
|
| 180 |
+
return rand_func_;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
std::string dtypeToCppString(const Dtype& dtype) override;
|
| 184 |
+
|
| 185 |
+
using IRPrinter::name_manager;
|
| 186 |
+
using IRPrinter::visit;
|
| 187 |
+
|
| 188 |
+
private:
|
| 189 |
+
VarPtr rand_func_;
|
| 190 |
+
const CudaAnalysis* cuda_analysis_;
|
| 191 |
+
|
| 192 |
+
void print_flat_alloc(const AllocatePtr& alloc);
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
// Construct Cuda C from the buffer and tensor input, and invoke the
|
| 196 |
+
// kernel when real arguments are provided.
|
| 197 |
+
class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen {
|
| 198 |
+
public:
|
| 199 |
+
template <typename... Ts>
|
| 200 |
+
CudaCodeGen(StmtPtr stmt, Ts... ts)
|
| 201 |
+
: CodeGen(
|
| 202 |
+
stmt,
|
| 203 |
+
std::vector<BufferArg>({BufferArg(ts)...}),
|
| 204 |
+
at::Device(at::kCUDA, at::cuda::current_device())) {
|
| 205 |
+
Initialize();
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
CudaCodeGen(
|
| 209 |
+
StmtPtr stmt,
|
| 210 |
+
const std::vector<BufferArg>& buffer_args,
|
| 211 |
+
at::Device device = at::Device(at::kCUDA, at::cuda::current_device()),
|
| 212 |
+
const std::string& kernel_func_name = "func")
|
| 213 |
+
: CodeGen(std::move(stmt), buffer_args, device, kernel_func_name) {
|
| 214 |
+
Initialize();
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
~CudaCodeGen() override;
|
| 218 |
+
|
| 219 |
+
void call(const std::vector<CallArg>& args) override;
|
| 220 |
+
void call_raw(const std::vector<void*>& args) override;
|
| 221 |
+
void call_with_numel(void** args, int64_t numel) override;
|
| 222 |
+
|
| 223 |
+
template <typename... Ts>
|
| 224 |
+
void operator()(const Ts&... ts) {
|
| 225 |
+
call(std::vector<CallArg>({CallArg(ts)...}));
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
at::Tensor empty_strided(
|
| 229 |
+
c10::IntArrayRef size,
|
| 230 |
+
c10::IntArrayRef stride,
|
| 231 |
+
std::optional<c10::ScalarType> dtype_opt,
|
| 232 |
+
std::optional<c10::Layout> layout_opt,
|
| 233 |
+
std::optional<c10::Device> device_opt,
|
| 234 |
+
std::optional<bool> pin_memory_opt) override;
|
| 235 |
+
|
| 236 |
+
const std::vector<ExprPtr>& gpu_block_extents() const {
|
| 237 |
+
return cuda_analysis_->gpu_block_extents();
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
const std::vector<ExprPtr>& gpu_thread_extents() const {
|
| 241 |
+
return cuda_analysis_->gpu_thread_extents();
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
std::string getCodeText(const std::string& attr = "") override {
|
| 245 |
+
return oss_.str();
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
private:
|
| 249 |
+
void Initialize();
|
| 250 |
+
|
| 251 |
+
void CompileToNVRTC(const std::string& code, const std::string& func_name);
|
| 252 |
+
|
| 253 |
+
UniqueNameManager* name_manager() {
|
| 254 |
+
if (!printer_) {
|
| 255 |
+
throw std::runtime_error("Null IRPrinter is not expected");
|
| 256 |
+
}
|
| 257 |
+
return printer_->name_manager();
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
std::ostream& os() {
|
| 261 |
+
return printer_->os();
|
| 262 |
+
}
|
| 263 |
+
|
| 264 |
+
std::ostringstream oss_;
|
| 265 |
+
std::unique_ptr<CudaPrinter> printer_;
|
| 266 |
+
std::unique_ptr<CudaAnalysis> cuda_analysis_;
|
| 267 |
+
std::unique_ptr<GPUMetaVarRewriter> metavar_rewriter_;
|
| 268 |
+
std::unordered_set<std::string> taken_func_names;
|
| 269 |
+
std::mutex eval_lock_;
|
| 270 |
+
CUfunction function_{nullptr};
|
| 271 |
+
bool has_random_ = false;
|
| 272 |
+
int thread_block_size_ = -1;
|
| 273 |
+
|
| 274 |
+
std::vector<bool> arg_pos_in_extents_;
|
| 275 |
+
#ifdef TORCH_ENABLE_LLVM
|
| 276 |
+
std::vector<ExprEval<LLVMCodeGen>> block_extents_eval_;
|
| 277 |
+
std::vector<ExprEval<LLVMCodeGen>> thread_extents_eval_;
|
| 278 |
+
#else
|
| 279 |
+
std::vector<ExprEval<SimpleIREvaluator>> block_extents_eval_;
|
| 280 |
+
std::vector<ExprEval<SimpleIREvaluator>> thread_extents_eval_;
|
| 281 |
+
#endif
|
| 282 |
+
|
| 283 |
+
std::string GetUniqueFuncName(const std::string& func_prefix);
|
| 284 |
+
};
|
| 285 |
+
|
| 286 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch {
|
| 4 |
+
namespace jit {
|
| 5 |
+
namespace tensorexpr {
|
| 6 |
+
|
| 7 |
+
constexpr auto philox_random_string = R"(
|
| 8 |
+
|
| 9 |
+
class Philox {
|
| 10 |
+
public:
|
| 11 |
+
__device__ inline Philox(unsigned long long seed,
|
| 12 |
+
unsigned long long subsequence,
|
| 13 |
+
unsigned long long offset) {
|
| 14 |
+
key.x = (unsigned int)seed;
|
| 15 |
+
key.y = (unsigned int)(seed >> 32);
|
| 16 |
+
counter = make_uint4(0, 0, 0, 0);
|
| 17 |
+
counter.z = (unsigned int)(subsequence);
|
| 18 |
+
counter.w = (unsigned int)(subsequence >> 32);
|
| 19 |
+
STATE = 0;
|
| 20 |
+
incr_n(offset / 4);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
__device__ inline unsigned long operator()() {
|
| 24 |
+
if(STATE == 0) {
|
| 25 |
+
uint4 counter_ = counter;
|
| 26 |
+
uint2 key_ = key;
|
| 27 |
+
for(int i = 0; i < 9; i++) {
|
| 28 |
+
counter_ = single_round(counter_, key_);
|
| 29 |
+
key_.x += (kPhilox10A); key_.y += (kPhilox10B);
|
| 30 |
+
}
|
| 31 |
+
output = single_round(counter_, key_);
|
| 32 |
+
incr();
|
| 33 |
+
}
|
| 34 |
+
unsigned long ret;
|
| 35 |
+
switch(STATE) {
|
| 36 |
+
case 0: ret = output.x; break;
|
| 37 |
+
case 1: ret = output.y; break;
|
| 38 |
+
case 2: ret = output.z; break;
|
| 39 |
+
case 3: ret = output.w; break;
|
| 40 |
+
}
|
| 41 |
+
STATE = (STATE + 1) % 4;
|
| 42 |
+
return ret;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
private:
|
| 46 |
+
uint4 counter;
|
| 47 |
+
uint4 output;
|
| 48 |
+
uint2 key;
|
| 49 |
+
unsigned int STATE;
|
| 50 |
+
__device__ inline void incr_n(unsigned long long n) {
|
| 51 |
+
unsigned int nlo = (unsigned int)(n);
|
| 52 |
+
unsigned int nhi = (unsigned int)(n >> 32);
|
| 53 |
+
counter.x += nlo;
|
| 54 |
+
if (counter.x < nlo)
|
| 55 |
+
nhi++;
|
| 56 |
+
counter.y += nhi;
|
| 57 |
+
if (nhi <= counter.y)
|
| 58 |
+
return;
|
| 59 |
+
if (++counter.z)
|
| 60 |
+
return;
|
| 61 |
+
++counter.w;
|
| 62 |
+
}
|
| 63 |
+
__device__ inline void incr() {
|
| 64 |
+
if (++counter.x)
|
| 65 |
+
return;
|
| 66 |
+
if (++counter.y)
|
| 67 |
+
return;
|
| 68 |
+
if (++counter.z)
|
| 69 |
+
return;
|
| 70 |
+
++counter.w;
|
| 71 |
+
}
|
| 72 |
+
__device__ unsigned int mulhilo32(unsigned int a, unsigned int b,
|
| 73 |
+
unsigned int *result_high) {
|
| 74 |
+
*result_high = __umulhi(a, b);
|
| 75 |
+
return a*b;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
__device__ inline uint4 single_round(uint4 ctr, uint2 key) {
|
| 79 |
+
unsigned int hi0;
|
| 80 |
+
unsigned int hi1;
|
| 81 |
+
unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);
|
| 82 |
+
unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);
|
| 83 |
+
|
| 84 |
+
uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};
|
| 85 |
+
return ret;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
static const unsigned long kPhilox10A = 0x9E3779B9;
|
| 89 |
+
static const unsigned long kPhilox10B = 0xBB67AE85;
|
| 90 |
+
static const unsigned long kPhiloxSA = 0xD2511F53;
|
| 91 |
+
static const unsigned long kPhiloxSB = 0xCD9E8D57;
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
// Inverse of 2^32.
|
| 95 |
+
#define M_RAN_INVM32 2.3283064e-10f
|
| 96 |
+
__device__ __inline__ float Uint32ToFloat(unsigned int x) {
|
| 97 |
+
return x * M_RAN_INVM32;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
)";
|
| 101 |
+
|
| 102 |
+
} // namespace tensorexpr
|
| 103 |
+
} // namespace jit
|
| 104 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cmath>
|
| 4 |
+
#include <cstring>
|
| 5 |
+
#include <utility>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
#include <c10/macros/Macros.h>
|
| 9 |
+
#include <c10/util/Logging.h>
|
| 10 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 11 |
+
#include <torch/csrc/jit/tensorexpr/exceptions.h>
|
| 12 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 13 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 14 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 15 |
+
#include <torch/csrc/jit/tensorexpr/types.h>
|
| 16 |
+
#include <torch/csrc/jit/tensorexpr/var_substitutor.h>
|
| 17 |
+
|
| 18 |
+
namespace torch::jit::tensorexpr {
|
| 19 |
+
|
| 20 |
+
class InterpValue {
|
| 21 |
+
public:
|
| 22 |
+
InterpValue() : dtype_(kInt) {
|
| 23 |
+
Intvalues.push_back(0);
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
template <typename T>
|
| 27 |
+
InterpValue(Dtype dtype, T v) : dtype_(dtype) {
|
| 28 |
+
#define TYPE_CASE(Type, Name) \
|
| 29 |
+
if (dtype == k##Name) { \
|
| 30 |
+
Name##values.push_back(v); \
|
| 31 |
+
return; \
|
| 32 |
+
}
|
| 33 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
|
| 34 |
+
#undef TYPE_CASE
|
| 35 |
+
throw unsupported_dtype();
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
#define VALUE_CTOR(Type, Name) \
|
| 39 |
+
InterpValue(Type v) : dtype_(k##Name) { \
|
| 40 |
+
Name##values.push_back(v); \
|
| 41 |
+
}
|
| 42 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_CTOR);
|
| 43 |
+
#undef VALUE_CTOR
|
| 44 |
+
|
| 45 |
+
explicit InterpValue(c10::quint8 v) : dtype_(kQUInt8) {
|
| 46 |
+
QUInt8values.emplace_back(v.val_);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
explicit InterpValue(c10::qint8 v) : dtype_(kQInt8) {
|
| 50 |
+
QInt8values.emplace_back(v.val_);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
#define VALUE_VEC_CTOR(Type, Name) \
|
| 54 |
+
InterpValue(const std::vector<Type>& v) \
|
| 55 |
+
: dtype_(Dtype(k##Name, v.size())), Name##values(v) {}
|
| 56 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_VEC_CTOR);
|
| 57 |
+
VALUE_VEC_CTOR(c10::quint8, QUInt8)
|
| 58 |
+
VALUE_VEC_CTOR(c10::qint8, QInt8)
|
| 59 |
+
#undef VALUE_VEC_CTOR
|
| 60 |
+
|
| 61 |
+
template <typename T>
|
| 62 |
+
T as() const;
|
| 63 |
+
|
| 64 |
+
template <typename T>
|
| 65 |
+
const std::vector<T>& as_vec() const;
|
| 66 |
+
|
| 67 |
+
int64_t intValue() const;
|
| 68 |
+
|
| 69 |
+
Dtype dtype() const {
|
| 70 |
+
return dtype_;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
private:
|
| 74 |
+
Dtype dtype_;
|
| 75 |
+
|
| 76 |
+
#define VALUE_STORAGE(Type, Name) std::vector<Type> Name##values;
|
| 77 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_STORAGE);
|
| 78 |
+
VALUE_STORAGE(c10::qint8, QInt8);
|
| 79 |
+
VALUE_STORAGE(c10::quint8, QUInt8);
|
| 80 |
+
#undef VALUE_STORAGE
|
| 81 |
+
void* ptr{nullptr};
|
| 82 |
+
};
|
| 83 |
+
|
| 84 |
+
#define VALUE_AS_DISPATCH(Type, Name) \
|
| 85 |
+
template <> \
|
| 86 |
+
inline Type InterpValue::as<Type>() const { \
|
| 87 |
+
if (dtype_ != k##Name) { \
|
| 88 |
+
throw unsupported_dtype(); \
|
| 89 |
+
} \
|
| 90 |
+
return Name##values[0]; \
|
| 91 |
+
}
|
| 92 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_DISPATCH);
|
| 93 |
+
VALUE_AS_DISPATCH(c10::quint8, QUInt8);
|
| 94 |
+
VALUE_AS_DISPATCH(c10::qint8, QInt8);
|
| 95 |
+
#undef VALUE_AS_DISPATCH
|
| 96 |
+
|
| 97 |
+
#define VALUE_AS_VEC_DISPATCH(Type, Name) \
|
| 98 |
+
template <> \
|
| 99 |
+
inline const std::vector<Type>& InterpValue::as_vec<Type>() const { \
|
| 100 |
+
if (dtype_.scalar_type() != ScalarType::Name) { \
|
| 101 |
+
throw unsupported_dtype(); \
|
| 102 |
+
} \
|
| 103 |
+
return Name##values; \
|
| 104 |
+
}
|
| 105 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_VEC_DISPATCH);
|
| 106 |
+
VALUE_AS_VEC_DISPATCH(c10::quint8, QUInt8);
|
| 107 |
+
VALUE_AS_VEC_DISPATCH(c10::qint8, QInt8);
|
| 108 |
+
#undef VALUE_AS_VEC_DISPATCH
|
| 109 |
+
|
| 110 |
+
template <typename Type>
|
| 111 |
+
auto underlyingValue(Type x) {
|
| 112 |
+
return x;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
template <>
|
| 116 |
+
inline auto underlyingValue<c10::quint8>(c10::quint8 x) {
|
| 117 |
+
return x.val_;
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
template <>
|
| 121 |
+
inline auto underlyingValue<c10::qint8>(c10::qint8 x) {
|
| 122 |
+
return x.val_;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
template <typename To, typename From>
|
| 126 |
+
To raw_bitcast(const From& src) {
|
| 127 |
+
TORCH_CHECK(sizeof(To) == sizeof(From), "Invalid bitcast invocation");
|
| 128 |
+
To storage;
|
| 129 |
+
std::memcpy(&storage, &src, sizeof(To));
|
| 130 |
+
return reinterpret_cast<To&>(storage);
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
class SimpleIREvaluatorImpl;
|
| 134 |
+
class TORCH_API SimpleIREvaluator : public CodeGen {
|
| 135 |
+
public:
|
| 136 |
+
SimpleIREvaluator(
|
| 137 |
+
StmtPtr stmt,
|
| 138 |
+
const std::vector<BufferArg>& buffer_args,
|
| 139 |
+
at::Device device = at::kCPU,
|
| 140 |
+
const std::string& kernel_func_name = "func");
|
| 141 |
+
|
| 142 |
+
~SimpleIREvaluator() override;
|
| 143 |
+
|
| 144 |
+
void call(const std::vector<CallArg>& args) override;
|
| 145 |
+
void call_raw(const std::vector<void*>& args) override;
|
| 146 |
+
|
| 147 |
+
template <typename... Ts>
|
| 148 |
+
void operator()(const Ts&... ts) {
|
| 149 |
+
std::vector<CallArg> args({CallArg(ts)...});
|
| 150 |
+
call(args);
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
void bindVar(const VarPtr& v, const ExprPtr& e);
|
| 154 |
+
InterpValue value() const;
|
| 155 |
+
|
| 156 |
+
private:
|
| 157 |
+
void bindArg(const BufferArg& buf, void* data);
|
| 158 |
+
void expand_intrinsics() {
|
| 159 |
+
GenericIntrinsicsExpander intrinsics_expander;
|
| 160 |
+
apply_mutator(&intrinsics_expander);
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
std::unique_ptr<SimpleIREvaluatorImpl> impl_;
|
| 164 |
+
};
|
| 165 |
+
|
| 166 |
+
template <class CodeGenType>
|
| 167 |
+
class ExprEval {
|
| 168 |
+
public:
|
| 169 |
+
using BufferArg = CodeGen::BufferArg;
|
| 170 |
+
using CallArg = CodeGen::CallArg;
|
| 171 |
+
|
| 172 |
+
template <typename... Ts>
|
| 173 |
+
ExprEval(const ExprHandle& expr, Ts... ts)
|
| 174 |
+
: ExprEval(expr, {BufferArg(ts)...}) {}
|
| 175 |
+
|
| 176 |
+
ExprEval(const ExprHandle& expr, const std::vector<BufferArg>& buffer_args)
|
| 177 |
+
: dtype_(expr.dtype()) {
|
| 178 |
+
std::vector<BufferArg> buffer_args_extended = buffer_args;
|
| 179 |
+
BufHandle ret_buf("ret_val", {1}, dtype_);
|
| 180 |
+
std::vector<ExprHandle> indices;
|
| 181 |
+
ExprHandle zero = IntImm::make(0);
|
| 182 |
+
for (size_t i = 0; i < ret_buf.ndim(); i++) {
|
| 183 |
+
indices.push_back(zero);
|
| 184 |
+
}
|
| 185 |
+
StmtPtr store_stmt = Store::make(ret_buf, indices, expr);
|
| 186 |
+
buffer_args_extended.emplace_back(ret_buf);
|
| 187 |
+
codegen_.reset(new CodeGenType(store_stmt, buffer_args_extended));
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
template <typename... Ts>
|
| 191 |
+
void operator()(Ts... ts) {
|
| 192 |
+
call(ts...);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
void operator()(const std::vector<CallArg>& call_args) {
|
| 196 |
+
call(call_args);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
void bindVar(VarPtr v, ExprPtr e) {
|
| 200 |
+
codegen_->bindVar(v, e);
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
void bindVar(const VarHandle& v, const ExprHandle& e) {
|
| 204 |
+
codegen_->bindVar(v.node(), e.node());
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
template <typename... Ts>
|
| 208 |
+
void call(Ts... ts) {
|
| 209 |
+
call({CallArg(ts)...});
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
void call(const std::vector<CallArg>& call_args) {
|
| 213 |
+
std::vector<CallArg> call_args_extended = call_args;
|
| 214 |
+
switch (dtype_.scalar_type()) {
|
| 215 |
+
#define TYPE_CASE(Type, Name) \
|
| 216 |
+
case ScalarType::Name: { \
|
| 217 |
+
std::vector<Type> ret_val_arg(1); \
|
| 218 |
+
call_args_extended.emplace_back(ret_val_arg); \
|
| 219 |
+
codegen_->call(call_args_extended); \
|
| 220 |
+
ret_value_ = InterpValue(ret_val_arg[0]); \
|
| 221 |
+
} break;
|
| 222 |
+
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
|
| 223 |
+
TYPE_CASE(c10::quint8, QUInt8);
|
| 224 |
+
TYPE_CASE(c10::qint8, QInt8);
|
| 225 |
+
#undef TYPE_CASE
|
| 226 |
+
case ScalarType::Bool: {
|
| 227 |
+
std::vector<unsigned char> ret_val_arg(1);
|
| 228 |
+
call_args_extended.emplace_back(ret_val_arg.data());
|
| 229 |
+
codegen_->call(call_args_extended);
|
| 230 |
+
ret_value_ = InterpValue((bool)ret_val_arg[0]);
|
| 231 |
+
} break;
|
| 232 |
+
default:
|
| 233 |
+
throw unsupported_dtype();
|
| 234 |
+
}
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
void call_raw(const std::vector<void*>& args) {
|
| 238 |
+
std::vector<void*> args_extended = args;
|
| 239 |
+
switch (dtype_.scalar_type()) {
|
| 240 |
+
#define TYPE_CASE(Type, Name) \
|
| 241 |
+
case ScalarType::Name: { \
|
| 242 |
+
std::vector<Type> ret_val_arg(1); \
|
| 243 |
+
args_extended.push_back(ret_val_arg.data()); \
|
| 244 |
+
codegen_->call_raw(args_extended); \
|
| 245 |
+
ret_value_ = InterpValue(ret_val_arg[0]); \
|
| 246 |
+
} break;
|
| 247 |
+
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
|
| 248 |
+
TYPE_CASE(c10::quint8, QUInt8);
|
| 249 |
+
TYPE_CASE(c10::qint8, QInt8);
|
| 250 |
+
#undef TYPE_CASE
|
| 251 |
+
case ScalarType::Bool: {
|
| 252 |
+
std::vector<unsigned char> ret_val_arg(1);
|
| 253 |
+
args_extended.push_back(ret_val_arg.data());
|
| 254 |
+
codegen_->call_raw(args_extended);
|
| 255 |
+
ret_value_ = InterpValue((bool)ret_val_arg[0]);
|
| 256 |
+
} break;
|
| 257 |
+
default:
|
| 258 |
+
throw unsupported_dtype();
|
| 259 |
+
}
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
template <typename T>
|
| 263 |
+
T value(const std::vector<void*>& args) {
|
| 264 |
+
call_raw(args);
|
| 265 |
+
return ret_value_.as<T>();
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
template <typename T, typename... Ts>
|
| 269 |
+
T value(Ts... ts) {
|
| 270 |
+
call(std::forward<Ts>(ts)...);
|
| 271 |
+
return ret_value_.as<T>();
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
Dtype dtype() {
|
| 275 |
+
return dtype_;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
private:
|
| 279 |
+
Dtype dtype_;
|
| 280 |
+
std::unique_ptr<CodeGenType> codegen_;
|
| 281 |
+
InterpValue ret_value_;
|
| 282 |
+
};
|
| 283 |
+
|
| 284 |
+
// Evaluates the given expression and returns an int64_t value if the result of
|
| 285 |
+
// the given expression is int64_t.
|
| 286 |
+
std::optional<int64_t> evalInt(ExprPtr e);
|
| 287 |
+
|
| 288 |
+
// Substitutes the given vars with their corresponding expressions in the input
|
| 289 |
+
// expression.
|
| 290 |
+
inline ExprPtr Substitute(const ExprPtr& expr, const VarMapping& var_mapping) {
|
| 291 |
+
VarSubMutator var_sub(var_mapping);
|
| 292 |
+
return expr->accept_mutator(&var_sub);
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
// Substitutes the given vars with their corresponding expressions in the input
|
| 296 |
+
// statement.
|
| 297 |
+
inline StmtPtr Substitute(const StmtPtr& stmt, const VarMapping& var_mapping) {
|
| 298 |
+
VarSubMutator var_sub(var_mapping);
|
| 299 |
+
return stmt->accept_mutator(&var_sub);
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
// Creates a clone of the input expression and substitutes the given vars with
|
| 303 |
+
// their corresponding expressions in the clone.
|
| 304 |
+
// NOTE: This works because cloning reuses variables and does not create new
|
| 305 |
+
// ones, and `VarMapping` input has variables as the key.
|
| 306 |
+
inline ExprPtr SubstituteInClone(
|
| 307 |
+
const ExprPtr& expr,
|
| 308 |
+
const VarMapping& var_mapping) {
|
| 309 |
+
VarSubMutator var_sub(var_mapping);
|
| 310 |
+
return Expr::clone(expr)->accept_mutator(&var_sub);
|
| 311 |
+
}
|
| 312 |
+
|
| 313 |
+
// Creates a clone of the input statement and substitutes the given vars with
|
| 314 |
+
// their corresponding expressions in the clone.
|
| 315 |
+
// NOTE: This works because cloning reuses variables and does not create new
|
| 316 |
+
// ones, and `VarMapping` input has variables as the key.
|
| 317 |
+
inline StmtPtr SubstituteInClone(
|
| 318 |
+
const StmtPtr& stmt,
|
| 319 |
+
const VarMapping& var_mapping) {
|
| 320 |
+
VarSubMutator var_sub(var_mapping);
|
| 321 |
+
return Stmt::clone(stmt)->accept_mutator(&var_sub);
|
| 322 |
+
}
|
| 323 |
+
|
| 324 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 5 |
+
|
| 6 |
+
#include <stdexcept>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of types
|
| 9 |
+
|
| 10 |
+
namespace torch::jit::tensorexpr {
|
| 11 |
+
class Expr;
|
| 12 |
+
class Stmt;
|
| 13 |
+
} // namespace torch::jit::tensorexpr
|
| 14 |
+
|
| 15 |
+
// Forward declarations of functions
|
| 16 |
+
namespace std {
|
| 17 |
+
TORCH_API std::string to_string(const torch::jit::tensorexpr::ExprPtr&);
|
| 18 |
+
TORCH_API std::string to_string(const torch::jit::tensorexpr::StmtPtr&);
|
| 19 |
+
} // namespace std
|
| 20 |
+
|
| 21 |
+
namespace torch::jit::tensorexpr {
|
| 22 |
+
|
| 23 |
+
class unsupported_dtype : public std::runtime_error {
|
| 24 |
+
public:
|
| 25 |
+
explicit unsupported_dtype() : std::runtime_error("UNSUPPORTED DTYPE") {}
|
| 26 |
+
explicit unsupported_dtype(const std::string& err)
|
| 27 |
+
: std::runtime_error("UNSUPPORTED DTYPE: " + err) {}
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
class out_of_range_index : public std::runtime_error {
|
| 31 |
+
public:
|
| 32 |
+
explicit out_of_range_index() : std::runtime_error("OUT OF RANGE INDEX") {}
|
| 33 |
+
explicit out_of_range_index(const std::string& err)
|
| 34 |
+
: std::runtime_error("OUT OF RANGE INDEX: " + err) {}
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
class unimplemented_lowering : public std::runtime_error {
|
| 38 |
+
public:
|
| 39 |
+
explicit unimplemented_lowering()
|
| 40 |
+
: std::runtime_error("UNIMPLEMENTED LOWERING") {}
|
| 41 |
+
explicit unimplemented_lowering(const ExprPtr& expr)
|
| 42 |
+
: std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(expr)) {}
|
| 43 |
+
explicit unimplemented_lowering(const StmtPtr& stmt)
|
| 44 |
+
: std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(stmt)) {}
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
class malformed_input : public std::runtime_error {
|
| 48 |
+
public:
|
| 49 |
+
explicit malformed_input() : std::runtime_error("MALFORMED INPUT") {}
|
| 50 |
+
explicit malformed_input(const std::string& err)
|
| 51 |
+
: std::runtime_error("MALFORMED INPUT: " + err) {}
|
| 52 |
+
explicit malformed_input(const ExprPtr& expr)
|
| 53 |
+
: std::runtime_error("MALFORMED INPUT: " + std::to_string(expr)) {}
|
| 54 |
+
explicit malformed_input(const std::string& err, const ExprPtr& expr)
|
| 55 |
+
: std::runtime_error(
|
| 56 |
+
"MALFORMED INPUT: " + err + " - " + std::to_string(expr)) {}
|
| 57 |
+
explicit malformed_input(const StmtPtr& stmt)
|
| 58 |
+
: std::runtime_error("MALFORMED INPUT: " + std::to_string(stmt)) {}
|
| 59 |
+
explicit malformed_input(const std::string& err, const StmtPtr& stmt)
|
| 60 |
+
: std::runtime_error(
|
| 61 |
+
"MALFORMED INPUT: " + err + " - " + std::to_string(stmt)) {}
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
class malformed_ir : public std::runtime_error {
|
| 65 |
+
public:
|
| 66 |
+
explicit malformed_ir() : std::runtime_error("MALFORMED IR") {}
|
| 67 |
+
explicit malformed_ir(const std::string& err)
|
| 68 |
+
: std::runtime_error("MALFORMED IR: " + err) {}
|
| 69 |
+
explicit malformed_ir(const ExprPtr& expr)
|
| 70 |
+
: std::runtime_error("MALFORMED IR: " + std::to_string(expr)) {}
|
| 71 |
+
explicit malformed_ir(const std::string& err, const ExprPtr& expr)
|
| 72 |
+
: std::runtime_error(
|
| 73 |
+
"MALFORMED IR: " + err + " - " + std::to_string(expr)) {}
|
| 74 |
+
explicit malformed_ir(const StmtPtr& stmt)
|
| 75 |
+
: std::runtime_error("MALFORMED IR: " + std::to_string(stmt)) {}
|
| 76 |
+
explicit malformed_ir(const std::string& err, const StmtPtr& stmt)
|
| 77 |
+
: std::runtime_error(
|
| 78 |
+
"MALFORMED IR: " + err + " - " + std::to_string(stmt)) {}
|
| 79 |
+
};
|
| 80 |
+
|
| 81 |
+
TORCH_API std::string buildErrorMessage(const std::string& s = "");
|
| 82 |
+
|
| 83 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h
ADDED
|
@@ -0,0 +1,493 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* This file implements the core classes for Tensor Expressions.
|
| 3 |
+
*
|
| 4 |
+
* The structure of the expressions is inspired by Halide/TVM IR.
|
| 5 |
+
*/
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <c10/core/MemoryFormat.h>
|
| 9 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 10 |
+
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
|
| 11 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 12 |
+
#include <torch/csrc/jit/tensorexpr/types.h>
|
| 13 |
+
#include <optional>
|
| 14 |
+
|
| 15 |
+
#include <utility>
|
| 16 |
+
|
| 17 |
+
namespace torch::jit::tensorexpr {
|
| 18 |
+
|
| 19 |
+
enum IRNodeType {
|
| 20 |
+
kPrimitive,
|
| 21 |
+
kAdd,
|
| 22 |
+
kSub,
|
| 23 |
+
kMul,
|
| 24 |
+
kDiv,
|
| 25 |
+
kMod,
|
| 26 |
+
kMax,
|
| 27 |
+
kMin,
|
| 28 |
+
kAnd,
|
| 29 |
+
kOr,
|
| 30 |
+
kLshift,
|
| 31 |
+
kRshift,
|
| 32 |
+
kXor,
|
| 33 |
+
kCompareSelect,
|
| 34 |
+
kCast,
|
| 35 |
+
kBitCast,
|
| 36 |
+
kOther,
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
// The common base between all expression node.
|
| 40 |
+
class TORCH_API Expr : public std::enable_shared_from_this<Expr> {
|
| 41 |
+
public:
|
| 42 |
+
explicit Expr(Dtype dtype, IRNodeType expr_type = kOther)
|
| 43 |
+
: dtype_(dtype), expr_type_(expr_type) {}
|
| 44 |
+
virtual ~Expr() = default;
|
| 45 |
+
Dtype dtype() const {
|
| 46 |
+
return dtype_;
|
| 47 |
+
}
|
| 48 |
+
virtual void accept(IRVisitor* visitor) = 0;
|
| 49 |
+
virtual ExprPtr accept_mutator(IRMutator* mutator) = 0;
|
| 50 |
+
|
| 51 |
+
IRNodeType expr_type() const {
|
| 52 |
+
return expr_type_;
|
| 53 |
+
}
|
| 54 |
+
// Is this a fixed (constant) immediate value.
|
| 55 |
+
virtual bool isConstant() const {
|
| 56 |
+
return false;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
void set_dtype(Dtype dtype) {
|
| 60 |
+
dtype_ = dtype;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
/*
|
| 64 |
+
* Make a deep copy of the given expression.
|
| 65 |
+
*
|
| 66 |
+
* All sub-expressions inside the given expressions are also cloned. Note
|
| 67 |
+
* that the variables are not deep-copied since they are immutable.
|
| 68 |
+
*/
|
| 69 |
+
static ExprPtr clone(const ExprPtr& s);
|
| 70 |
+
|
| 71 |
+
protected:
|
| 72 |
+
std::shared_ptr<Expr> getptr() {
|
| 73 |
+
return shared_from_this();
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
private:
|
| 77 |
+
Dtype dtype_;
|
| 78 |
+
IRNodeType expr_type_;
|
| 79 |
+
};
|
| 80 |
+
|
| 81 |
+
// A CRTP pattern to accept visitors for children class,
|
| 82 |
+
// and dispatch back to the children.
|
| 83 |
+
template <class Op, class Base = Expr>
|
| 84 |
+
class ExprNode : public Base {
|
| 85 |
+
public:
|
| 86 |
+
using ExprNodeBase = ExprNode<Op>;
|
| 87 |
+
void accept(IRVisitor* visitor) override {
|
| 88 |
+
visitor->visit(static_to<Op>(Base::getptr()));
|
| 89 |
+
}
|
| 90 |
+
ExprPtr accept_mutator(IRMutator* mutator) override;
|
| 91 |
+
// pass the constructor to the base class
|
| 92 |
+
using Base::Base;
|
| 93 |
+
};
|
| 94 |
+
|
| 95 |
+
// A wrapper object to the underlying ExprNode.
|
| 96 |
+
// Also serves the primary way to build and operate on other expressions.
|
| 97 |
+
class TORCH_API ExprHandle {
|
| 98 |
+
public:
|
| 99 |
+
ExprHandle() = default;
|
| 100 |
+
explicit ExprHandle(ExprPtr node) : base_expr_node_(std::move(node)) {}
|
| 101 |
+
|
| 102 |
+
ExprPtr node() {
|
| 103 |
+
return base_expr_node_;
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
ExprPtr node() const {
|
| 107 |
+
return base_expr_node_;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
bool empty() const {
|
| 111 |
+
return base_expr_node_ == nullptr;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
#define IMM_EXPR_DECLARE(Type, Name) ExprHandle(Type v);
|
| 115 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_EXPR_DECLARE);
|
| 116 |
+
#undef IMM_EXPR_DECLARE
|
| 117 |
+
|
| 118 |
+
template <class Op>
|
| 119 |
+
NodePtr<Op> AsNode() {
|
| 120 |
+
return to<Op>(this->node());
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
template <class Op>
|
| 124 |
+
NodePtr<Op> AsNode() const {
|
| 125 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
| 126 |
+
return const_cast<ExprHandle*>(this)->AsNode<Op>();
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
Dtype dtype() const {
|
| 130 |
+
return node()->dtype();
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
// Handling the math operators.
|
| 134 |
+
ExprHandle operator+(const ExprHandle& other) const;
|
| 135 |
+
ExprHandle operator-(const ExprHandle& other) const;
|
| 136 |
+
ExprHandle operator*(const ExprHandle& other) const;
|
| 137 |
+
ExprHandle operator/(const ExprHandle& other) const;
|
| 138 |
+
ExprHandle operator%(const ExprHandle& other) const;
|
| 139 |
+
ExprHandle operator==(const ExprHandle& other) const;
|
| 140 |
+
ExprHandle operator!=(const ExprHandle& other) const;
|
| 141 |
+
ExprHandle operator>(const ExprHandle& other) const;
|
| 142 |
+
ExprHandle operator>=(const ExprHandle& other) const;
|
| 143 |
+
ExprHandle operator<(const ExprHandle& other) const;
|
| 144 |
+
ExprHandle operator<=(const ExprHandle& other) const;
|
| 145 |
+
ExprHandle operator&(const ExprHandle& other) const;
|
| 146 |
+
ExprHandle operator|(const ExprHandle& other) const;
|
| 147 |
+
ExprHandle operator&&(const ExprHandle& other) const;
|
| 148 |
+
ExprHandle operator||(const ExprHandle& other) const;
|
| 149 |
+
ExprHandle operator^(const ExprHandle& other) const;
|
| 150 |
+
ExprHandle operator<<(const ExprHandle& other) const;
|
| 151 |
+
ExprHandle operator>>(const ExprHandle& other) const;
|
| 152 |
+
|
| 153 |
+
private:
|
| 154 |
+
ExprPtr base_expr_node_ = nullptr;
|
| 155 |
+
};
|
| 156 |
+
|
| 157 |
+
// The underlying representation node to a Var.
|
| 158 |
+
// Currently, each Var object represents a unique variable, even though the
|
| 159 |
+
// names might be the same. We should consider add a unique_name as well.
|
| 160 |
+
class TORCH_API Var : public ExprNode<Var> {
|
| 161 |
+
public:
|
| 162 |
+
static ExprHandle make(const std::string& name_hint, Dtype dtype) {
|
| 163 |
+
return ExprHandle(alloc<Var>(name_hint, dtype));
|
| 164 |
+
}
|
| 165 |
+
static ExprHandle make(Dtype dtype) {
|
| 166 |
+
return ExprHandle(alloc<Var>("", dtype));
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
// TODO: unique_name
|
| 170 |
+
const std::string& name_hint() const {
|
| 171 |
+
return name_hint_;
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
void set_name_hint(const std::string& name) {
|
| 175 |
+
name_hint_ = name;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
void set_name_hint(std::string&& name) {
|
| 179 |
+
name_hint_ = std::move(name);
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
Var(std::string name_hint, Dtype dtype)
|
| 183 |
+
: ExprNodeBase(dtype, kPrimitive), name_hint_(std::move(name_hint)) {}
|
| 184 |
+
|
| 185 |
+
private:
|
| 186 |
+
std::string name_hint_;
|
| 187 |
+
};
|
| 188 |
+
|
| 189 |
+
TORCH_API std::vector<ExprPtr> make_contiguous_strides(
|
| 190 |
+
const std::vector<ExprHandle>& dims);
|
| 191 |
+
TORCH_API std::vector<ExprPtr> make_channels_last_strides(
|
| 192 |
+
const std::vector<ExprHandle>& dims);
|
| 193 |
+
|
| 194 |
+
class TORCH_API Buf : public ExprNode<Buf> {
|
| 195 |
+
public:
|
| 196 |
+
static BufHandle make(const std::vector<ExprHandle>& dims, Dtype dtype);
|
| 197 |
+
|
| 198 |
+
static BufHandle make(
|
| 199 |
+
const std::string& name_hint,
|
| 200 |
+
const std::vector<ExprHandle>& dims,
|
| 201 |
+
const std::vector<ExprHandle>& strides,
|
| 202 |
+
Dtype dtype);
|
| 203 |
+
|
| 204 |
+
static BufHandle make(
|
| 205 |
+
const std::string& name_hint,
|
| 206 |
+
const std::vector<ExprHandle>& dims,
|
| 207 |
+
Dtype dtype,
|
| 208 |
+
std::optional<ExprHandle> initializer = std::nullopt,
|
| 209 |
+
const std::optional<std::vector<ExprHandle>>& strides = std::nullopt,
|
| 210 |
+
std::optional<ExprHandle> qscale = std::nullopt,
|
| 211 |
+
std::optional<ExprHandle> qzero = std::nullopt);
|
| 212 |
+
|
| 213 |
+
// TODO: unique_name
|
| 214 |
+
VarPtr base_handle() const {
|
| 215 |
+
return base_handle_;
|
| 216 |
+
}
|
| 217 |
+
void set_base_handle(VarPtr base_handle) {
|
| 218 |
+
base_handle_ = std::move(base_handle);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
const std::string& name_hint() const {
|
| 222 |
+
return base_handle_->name_hint();
|
| 223 |
+
}
|
| 224 |
+
void set_name_hint(const std::string& name_hint) {
|
| 225 |
+
base_handle_->set_name_hint(name_hint);
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
Buf(const std::string& name_hint,
|
| 229 |
+
const std::vector<ExprPtr>& dims,
|
| 230 |
+
Dtype dtype,
|
| 231 |
+
ExprPtr initializer = nullptr,
|
| 232 |
+
std::optional<std::vector<ExprPtr>> strides = std::nullopt,
|
| 233 |
+
ExprPtr qscale = nullptr,
|
| 234 |
+
ExprPtr qzero = nullptr)
|
| 235 |
+
: Buf(alloc<Var>(name_hint, kHandle),
|
| 236 |
+
dims,
|
| 237 |
+
dtype,
|
| 238 |
+
std::move(initializer),
|
| 239 |
+
std::move(strides),
|
| 240 |
+
std::move(qscale),
|
| 241 |
+
std::move(qzero)) {}
|
| 242 |
+
|
| 243 |
+
Buf(const VarPtr& var,
|
| 244 |
+
std::vector<ExprPtr> dims,
|
| 245 |
+
Dtype dtype,
|
| 246 |
+
ExprPtr initializer = nullptr,
|
| 247 |
+
std::optional<std::vector<ExprPtr>> strides = std::nullopt,
|
| 248 |
+
ExprPtr qscale = nullptr,
|
| 249 |
+
ExprPtr qzero = nullptr);
|
| 250 |
+
|
| 251 |
+
size_t ndim() const {
|
| 252 |
+
return dims_.size();
|
| 253 |
+
}
|
| 254 |
+
ExprPtr dim(size_t index) const {
|
| 255 |
+
if (index >= ndim()) {
|
| 256 |
+
throw out_of_range_index();
|
| 257 |
+
}
|
| 258 |
+
return dims_[index];
|
| 259 |
+
}
|
| 260 |
+
std::vector<ExprPtr> dims() const {
|
| 261 |
+
return dims_;
|
| 262 |
+
}
|
| 263 |
+
void set_dims(std::vector<ExprPtr> dims) {
|
| 264 |
+
dims_ = std::move(dims);
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
std::vector<ExprPtr> strides() const {
|
| 268 |
+
return strides_;
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
void set_strides(std::vector<ExprPtr> strides) {
|
| 272 |
+
strides_ = std::move(strides);
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
ExprPtr initializer() const {
|
| 276 |
+
return initializer_;
|
| 277 |
+
};
|
| 278 |
+
|
| 279 |
+
ExprPtr qzero() const {
|
| 280 |
+
return qzero_;
|
| 281 |
+
}
|
| 282 |
+
|
| 283 |
+
ExprPtr qscale() const {
|
| 284 |
+
return qscale_;
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
void set_qzero(ExprPtr qzero) {
|
| 288 |
+
qzero_ = std::move(qzero);
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
void set_qscale(ExprPtr qscale) {
|
| 292 |
+
qscale_ = std::move(qscale);
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
bool hasConstantDims() const {
|
| 296 |
+
for (const auto& d : dims_) {
|
| 297 |
+
if (!d->isConstant()) {
|
| 298 |
+
return false;
|
| 299 |
+
}
|
| 300 |
+
}
|
| 301 |
+
return true;
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
bool is_contiguous(
|
| 305 |
+
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const;
|
| 306 |
+
|
| 307 |
+
// The channels-last 1d can benefit the performance of some operators like
|
| 308 |
+
// conv1d. But the MemoryFormat enum has not covered this layout yet. Hence,
|
| 309 |
+
// we abstract a dedicated function to check channels-last 1d contiguous.
|
| 310 |
+
//
|
| 311 |
+
// Channels-last 1d:
|
| 312 |
+
// dims: n c l
|
| 313 |
+
// strides(nlc): c*l 1 c
|
| 314 |
+
bool is_channels_last_1d_contiguous() const {
|
| 315 |
+
if (dims_.size() != 3) {
|
| 316 |
+
return false;
|
| 317 |
+
}
|
| 318 |
+
return is_stride_one(1) && is_cont_with(2, 1) && is_cont_with(0, 2);
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
private:
|
| 322 |
+
bool is_cont_with(int cur_dim, int adjacent_dim) const;
|
| 323 |
+
bool is_stride_one(int cur_dim) const;
|
| 324 |
+
|
| 325 |
+
VarPtr base_handle_;
|
| 326 |
+
std::vector<ExprPtr> dims_;
|
| 327 |
+
std::vector<ExprPtr> strides_;
|
| 328 |
+
ExprPtr initializer_;
|
| 329 |
+
// qscale_ and qzero_ are used only for quantized dtypes Bufs: kQUInt8, kQInt8
|
| 330 |
+
ExprPtr qscale_;
|
| 331 |
+
ExprPtr qzero_;
|
| 332 |
+
};
|
| 333 |
+
|
| 334 |
+
class TORCH_API BufHandle : public ExprHandle {
|
| 335 |
+
public:
|
| 336 |
+
BufHandle(
|
| 337 |
+
const std::string& name_hint,
|
| 338 |
+
const std::vector<ExprHandle>& dims,
|
| 339 |
+
Dtype dtype)
|
| 340 |
+
: ExprHandle(Buf::make(name_hint, dims, dtype)) {}
|
| 341 |
+
|
| 342 |
+
BufHandle(
|
| 343 |
+
const std::string& name_hint,
|
| 344 |
+
const std::vector<ExprHandle>& dims,
|
| 345 |
+
const std::vector<ExprHandle>& strides,
|
| 346 |
+
Dtype dtype)
|
| 347 |
+
: ExprHandle(Buf::make(name_hint, dims, strides, dtype)) {}
|
| 348 |
+
|
| 349 |
+
BufHandle(const std::vector<ExprHandle>& dims, Dtype dtype)
|
| 350 |
+
: ExprHandle(Buf::make("_", dims, dtype)) {}
|
| 351 |
+
|
| 352 |
+
explicit BufHandle(Dtype dtype) : ExprHandle(Buf::make("_", {}, dtype)) {}
|
| 353 |
+
|
| 354 |
+
explicit BufHandle(BufPtr node) : ExprHandle(std::move(node)) {}
|
| 355 |
+
BufPtr node() const {
|
| 356 |
+
return static_to<Buf>(ExprHandle::node());
|
| 357 |
+
}
|
| 358 |
+
BufPtr node() {
|
| 359 |
+
return static_to<Buf>(ExprHandle::node());
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
template <typename... Ts>
|
| 363 |
+
inline ExprHandle load(const Ts&... ts) const;
|
| 364 |
+
|
| 365 |
+
template <typename T>
|
| 366 |
+
inline ExprHandle load(const std::vector<T>& args) const;
|
| 367 |
+
|
| 368 |
+
inline ExprHandle load(const std::vector<ExprHandle>& args) const;
|
| 369 |
+
|
| 370 |
+
StorePtr store(const std::vector<ExprHandle>& args, const ExprHandle& val)
|
| 371 |
+
const;
|
| 372 |
+
|
| 373 |
+
bool operator==(const BufHandle& other) const {
|
| 374 |
+
return this->node() == other.node();
|
| 375 |
+
}
|
| 376 |
+
bool operator!=(const BufHandle& other) const {
|
| 377 |
+
return !(*this == other);
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
const std::string& name_hint() const {
|
| 381 |
+
return this->node()->name_hint();
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
bool empty() const {
|
| 385 |
+
return (this->node() == nullptr);
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
size_t ndim() const {
|
| 389 |
+
return node()->ndim();
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
std::vector<ExprHandle> dims() const;
|
| 393 |
+
|
| 394 |
+
ExprHandle dim(size_t index) const {
|
| 395 |
+
return ExprHandle(node()->dim(index));
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
bool is_contiguous(
|
| 399 |
+
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const {
|
| 400 |
+
return node()->is_contiguous(memory_format);
|
| 401 |
+
}
|
| 402 |
+
|
| 403 |
+
bool is_channels_last_1d_contiguous() const {
|
| 404 |
+
return node()->is_channels_last_1d_contiguous();
|
| 405 |
+
}
|
| 406 |
+
};
|
| 407 |
+
|
| 408 |
+
// An expression to construct the underlying variable node.
|
| 409 |
+
// Note: do not store any info here, since it is often possible to slice this
|
| 410 |
+
// object. For example: VarHandle x('x'); ExprHandle x2 = x;
|
| 411 |
+
class TORCH_API VarHandle : public ExprHandle {
|
| 412 |
+
public:
|
| 413 |
+
// Creates an empty VarHandle whose base Var is set to nullptr.
|
| 414 |
+
VarHandle() : ExprHandle() {}
|
| 415 |
+
|
| 416 |
+
explicit VarHandle(Dtype dtype) : ExprHandle(Var::make(dtype)) {}
|
| 417 |
+
|
| 418 |
+
VarHandle(const std::string& name_hint, Dtype dtype)
|
| 419 |
+
: ExprHandle(Var::make(name_hint, dtype)) {}
|
| 420 |
+
|
| 421 |
+
explicit VarHandle(VarPtr node) : ExprHandle(std::move(node)) {}
|
| 422 |
+
|
| 423 |
+
VarPtr node() const {
|
| 424 |
+
return static_to<Var>(ExprHandle::node());
|
| 425 |
+
}
|
| 426 |
+
bool operator==(const VarHandle& other) const {
|
| 427 |
+
return this->node() == other.node();
|
| 428 |
+
}
|
| 429 |
+
bool operator!=(const VarHandle& other) const {
|
| 430 |
+
return !(*this == other);
|
| 431 |
+
}
|
| 432 |
+
|
| 433 |
+
const std::string& name_hint() const {
|
| 434 |
+
return this->node()->name_hint();
|
| 435 |
+
}
|
| 436 |
+
bool empty() const {
|
| 437 |
+
return (this->node() == nullptr);
|
| 438 |
+
}
|
| 439 |
+
};
|
| 440 |
+
|
| 441 |
+
template <class Op, class Base>
|
| 442 |
+
ExprPtr ExprNode<Op, Base>::accept_mutator(IRMutator* mutator) {
|
| 443 |
+
return mutator->mutate(static_to<Op>(Base::getptr()));
|
| 444 |
+
}
|
| 445 |
+
|
| 446 |
+
inline bool same_node(const ExprHandle& expr1, const ExprHandle& expr2) {
|
| 447 |
+
return expr1.AsNode<Expr>() == expr2.AsNode<Expr>();
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
TORCH_API ExprHandle sin(const ExprHandle& v);
|
| 451 |
+
TORCH_API ExprHandle cos(const ExprHandle& v);
|
| 452 |
+
TORCH_API ExprHandle tan(const ExprHandle& v);
|
| 453 |
+
TORCH_API ExprHandle asin(const ExprHandle& v);
|
| 454 |
+
TORCH_API ExprHandle acos(const ExprHandle& v);
|
| 455 |
+
TORCH_API ExprHandle atan(const ExprHandle& v);
|
| 456 |
+
TORCH_API ExprHandle sinh(const ExprHandle& v);
|
| 457 |
+
TORCH_API ExprHandle cosh(const ExprHandle& v);
|
| 458 |
+
TORCH_API ExprHandle tanh(const ExprHandle& v);
|
| 459 |
+
TORCH_API ExprHandle sigmoid(const ExprHandle& v);
|
| 460 |
+
TORCH_API ExprHandle exp(const ExprHandle& v);
|
| 461 |
+
TORCH_API ExprHandle expm1(const ExprHandle& v);
|
| 462 |
+
TORCH_API ExprHandle abs(const ExprHandle& v);
|
| 463 |
+
TORCH_API ExprHandle log(const ExprHandle& v);
|
| 464 |
+
TORCH_API ExprHandle fast_tanh(const ExprHandle& v);
|
| 465 |
+
TORCH_API ExprHandle fast_sigmoid(const ExprHandle& v);
|
| 466 |
+
TORCH_API ExprHandle fast_log(const ExprHandle& v);
|
| 467 |
+
TORCH_API ExprHandle log_vml(const ExprHandle& v);
|
| 468 |
+
TORCH_API ExprHandle log2(const ExprHandle& v);
|
| 469 |
+
TORCH_API ExprHandle log10(const ExprHandle& v);
|
| 470 |
+
TORCH_API ExprHandle log1p(const ExprHandle& v);
|
| 471 |
+
TORCH_API ExprHandle erf(const ExprHandle& v);
|
| 472 |
+
TORCH_API ExprHandle erfc(const ExprHandle& v);
|
| 473 |
+
TORCH_API ExprHandle sqrt(const ExprHandle& v);
|
| 474 |
+
TORCH_API ExprHandle rsqrt(const ExprHandle& v);
|
| 475 |
+
TORCH_API ExprHandle ceil(const ExprHandle& v);
|
| 476 |
+
TORCH_API ExprHandle floor(const ExprHandle& v);
|
| 477 |
+
TORCH_API ExprHandle round(const ExprHandle& v);
|
| 478 |
+
TORCH_API ExprHandle trunc(const ExprHandle& v);
|
| 479 |
+
TORCH_API ExprHandle frac(const ExprHandle& v);
|
| 480 |
+
TORCH_API ExprHandle lgamma(const ExprHandle& v);
|
| 481 |
+
TORCH_API ExprHandle atan2(const ExprHandle& v1, const ExprHandle& v2);
|
| 482 |
+
TORCH_API ExprHandle pow(const ExprHandle& v1, const ExprHandle& v2);
|
| 483 |
+
TORCH_API ExprHandle fmod(const ExprHandle& v1, const ExprHandle& v2);
|
| 484 |
+
TORCH_API ExprHandle remainder(const ExprHandle& v1, const ExprHandle& v2);
|
| 485 |
+
TORCH_API ExprHandle isnan(const ExprHandle& v1);
|
| 486 |
+
TORCH_API ExprHandle Relu(const ExprHandle& v1);
|
| 487 |
+
|
| 488 |
+
TORCH_API ExprHandle
|
| 489 |
+
ifThenElse(const ExprHandle& c, const ExprHandle& t, const ExprHandle& f);
|
| 490 |
+
|
| 491 |
+
TORCH_API ExprHandle expr_to_vec(ExprHandle v, int lanes);
|
| 492 |
+
|
| 493 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Config.h>
|
| 4 |
+
#include <ATen/Functions.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <torch/csrc/Export.h>
|
| 7 |
+
#include <cstdint>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
#define FOR_ALL_EXTERNAL_FUNCTIONS(_) \
|
| 11 |
+
_(nnc_aten_adaptive_avg_pool2d) \
|
| 12 |
+
_(nnc_aten_addmm) \
|
| 13 |
+
_(nnc_aten_conv2d) \
|
| 14 |
+
_(nnc_aten_conv1d) \
|
| 15 |
+
_(nnc_aten_conv1d_out) \
|
| 16 |
+
_(nnc_aten_dequantize) \
|
| 17 |
+
_(nnc_aten_dequantize_out) \
|
| 18 |
+
_(nnc_aten_embedding) \
|
| 19 |
+
_(nnc_aten_matmul) \
|
| 20 |
+
_(nnc_aten_mv) \
|
| 21 |
+
_(nnc_aten_mm) \
|
| 22 |
+
_(nnc_aten_mean) \
|
| 23 |
+
_(nnc_aten_max_red) \
|
| 24 |
+
_(nnc_aten_max_red_out) \
|
| 25 |
+
_(nnc_aten_quantized_conv1d) \
|
| 26 |
+
_(nnc_aten_quantized_conv1d_out) \
|
| 27 |
+
_(nnc_aten_quantized_conv2d) \
|
| 28 |
+
_(nnc_aten_quantized_conv2d_out) \
|
| 29 |
+
_(nnc_aten_quantized_conv2d_relu) \
|
| 30 |
+
_(nnc_aten_quantized_conv2d_relu_out) \
|
| 31 |
+
_(nnc_aten_quantized_linear) \
|
| 32 |
+
_(nnc_aten_quantized_linear_out) \
|
| 33 |
+
_(nnc_aten_quantized_linear_relu) \
|
| 34 |
+
_(nnc_aten_quantized_add) \
|
| 35 |
+
_(nnc_aten_quantized_cat) \
|
| 36 |
+
_(nnc_aten_quantized_mul) \
|
| 37 |
+
_(nnc_aten_quantized_mul_out) \
|
| 38 |
+
_(nnc_aten_quantized_mul_scalar) \
|
| 39 |
+
_(nnc_aten_quantized_mul_scalar_out) \
|
| 40 |
+
_(nnc_aten_quantized_relu) \
|
| 41 |
+
_(nnc_aten_quantized_sigmoid) \
|
| 42 |
+
_(nnc_aten_quantized_sigmoid_out) \
|
| 43 |
+
_(nnc_aten_quantize_per_tensor) \
|
| 44 |
+
_(nnc_aten_quantize_per_tensor_out) \
|
| 45 |
+
_(nnc_aten_triangular_solve) \
|
| 46 |
+
_(nnc_aten_upsample_nearest2d) \
|
| 47 |
+
_(nnc_aten_upsample_nearest2d_out) \
|
| 48 |
+
_(nnc_prepacked_conv2d_clamp_run) \
|
| 49 |
+
_(nnc_prepacked_linear_clamp_run)
|
| 50 |
+
|
| 51 |
+
#define DECLARE_EXTERNAL_FUNCTION(NAME) \
|
| 52 |
+
TORCH_API void NAME( \
|
| 53 |
+
int64_t bufs_num, \
|
| 54 |
+
void** buf_data, \
|
| 55 |
+
int64_t* buf_ranks, \
|
| 56 |
+
int64_t* buf_dims, \
|
| 57 |
+
int64_t* buf_strides, \
|
| 58 |
+
int8_t* buf_dtypes, \
|
| 59 |
+
int64_t args_num, \
|
| 60 |
+
int64_t* extra_args);
|
| 61 |
+
|
| 62 |
+
namespace torch::jit::tensorexpr {
|
| 63 |
+
struct QIData final {
|
| 64 |
+
double scale;
|
| 65 |
+
int64_t zero;
|
| 66 |
+
c10::ScalarType scalarType;
|
| 67 |
+
};
|
| 68 |
+
std::vector<at::Tensor> constructTensors(
|
| 69 |
+
int64_t bufs_num,
|
| 70 |
+
void** buf_data,
|
| 71 |
+
int64_t* buf_ranks,
|
| 72 |
+
int64_t* buf_dims,
|
| 73 |
+
int64_t* buf_strides,
|
| 74 |
+
int8_t* buf_dtypes,
|
| 75 |
+
std::optional<std::vector<std::pair<size_t, QIData>>> qdataArg =
|
| 76 |
+
std::nullopt);
|
| 77 |
+
|
| 78 |
+
std::vector<at::Tensor> constructTensors2(
|
| 79 |
+
int64_t bufs_in_num,
|
| 80 |
+
void** buf_data,
|
| 81 |
+
int64_t* buf_ranks,
|
| 82 |
+
int64_t* buf_dims,
|
| 83 |
+
int64_t* buf_strides,
|
| 84 |
+
int8_t* buf_dtypes,
|
| 85 |
+
std::optional<std::vector<std::pair<size_t, QIData>>> qdataArg =
|
| 86 |
+
std::nullopt,
|
| 87 |
+
size_t bufs_out_num = 0);
|
| 88 |
+
|
| 89 |
+
#ifdef C10_MOBILE
|
| 90 |
+
extern "C" {
|
| 91 |
+
#endif
|
| 92 |
+
void DispatchParallel(
|
| 93 |
+
int8_t* func,
|
| 94 |
+
int64_t start,
|
| 95 |
+
int64_t stop,
|
| 96 |
+
int8_t* packed_data) noexcept;
|
| 97 |
+
|
| 98 |
+
FOR_ALL_EXTERNAL_FUNCTIONS(DECLARE_EXTERNAL_FUNCTION)
|
| 99 |
+
#if AT_MKLDNN_ENABLED()
|
| 100 |
+
DECLARE_EXTERNAL_FUNCTION(nnc_mkldnn_prepacked_conv_run);
|
| 101 |
+
#endif
|
| 102 |
+
|
| 103 |
+
TORCH_API void nnc_aten_free(size_t bufs_num, void** ptrs) noexcept;
|
| 104 |
+
|
| 105 |
+
#ifdef C10_MOBILE
|
| 106 |
+
} // extern "C"
|
| 107 |
+
#endif
|
| 108 |
+
|
| 109 |
+
} // namespace torch::jit::tensorexpr
|
| 110 |
+
|
| 111 |
+
#undef DECLARE_EXTERNAL_FUNCTION
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <ATen/Parallel.h>
|
| 5 |
+
#include <torch/csrc/Export.h>
|
| 6 |
+
#include <cstdint>
|
| 7 |
+
|
| 8 |
+
namespace torch::jit::tensorexpr {
|
| 9 |
+
|
| 10 |
+
#ifdef C10_MOBILE
|
| 11 |
+
extern "C" {
|
| 12 |
+
#endif
|
| 13 |
+
void DispatchParallel(
|
| 14 |
+
int8_t* func,
|
| 15 |
+
int64_t start,
|
| 16 |
+
int64_t stop,
|
| 17 |
+
int8_t* packed_data) noexcept;
|
| 18 |
+
|
| 19 |
+
TORCH_API void nnc_aten_free(size_t bufs_num, void** ptrs) noexcept;
|
| 20 |
+
|
| 21 |
+
#ifdef C10_MOBILE
|
| 22 |
+
} // extern "C"
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <unordered_map>
|
| 7 |
+
|
| 8 |
+
namespace torch::jit::tensorexpr {
|
| 9 |
+
|
| 10 |
+
// The external functions that could be called from NNC must have the same
|
| 11 |
+
// signature defined by `NNCExternalFunction`.
|
| 12 |
+
//
|
| 13 |
+
// Why this signature?
|
| 14 |
+
// It was picked for two reasons: 1) it should be generic enough to represent
|
| 15 |
+
// most of the ops we might want to call, 2) it should be possible to generate a
|
| 16 |
+
// code for this call in LLVM codegen.
|
| 17 |
+
// The first 5 parameters allow to pass any number of contiguous CPU tensors in
|
| 18 |
+
// case we need to run aten ops (TODO: support different devices). The first
|
| 19 |
+
// buffer in the array is assumed to be the output buffer. We couldn't use
|
| 20 |
+
// `at::Tensor` (or `c10::IValue`) type there directly as it would mean that
|
| 21 |
+
// we'd need to declare it in LLVM codegen in LLVM IR form, which would be very
|
| 22 |
+
// cumbersome and hard to maintain. Note that the dimensions of all tensors are
|
| 23 |
+
// concatenated into a single array buf_dims. We do not need to pass its length,
|
| 24 |
+
// since it can be deduced from total number of buffers and their ranks.
|
| 25 |
+
//
|
| 26 |
+
// The last 2 arguments allow to pass any non-tensor arguments encoded as an
|
| 27 |
+
// array of int64_t values. The way they are encoded is not specified and could
|
| 28 |
+
// be arbitrary - whatever the most convenient for the specific bridge function
|
| 29 |
+
// is.
|
| 30 |
+
//
|
| 31 |
+
// The bridge functions must not throw exceptions - properly propagating them
|
| 32 |
+
// from the generated code is too cumbersome, and thus all calls to functions
|
| 33 |
+
// that could throw must be wrapped with try-catch blocks.
|
| 34 |
+
using NNCExternalFunction = void (*)(
|
| 35 |
+
int64_t bufs_num,
|
| 36 |
+
void** buf_data,
|
| 37 |
+
int64_t* buf_ranks,
|
| 38 |
+
int64_t* buf_dims,
|
| 39 |
+
int64_t* buf_strides,
|
| 40 |
+
int8_t* buf_dtypes,
|
| 41 |
+
int64_t args_num,
|
| 42 |
+
int64_t* extra_args);
|
| 43 |
+
|
| 44 |
+
// Return a global map "function-name" -> "function-pointer" for all registered
|
| 45 |
+
// in NNC external functions
|
| 46 |
+
TORCH_API std::unordered_map<std::string, NNCExternalFunction>&
|
| 47 |
+
getNNCFunctionRegistry();
|
| 48 |
+
|
| 49 |
+
// To register a new external function in NNC one needs to create an instance of
|
| 50 |
+
// this struct
|
| 51 |
+
struct RegisterNNCExternalFunction {
|
| 52 |
+
RegisterNNCExternalFunction(const std::string& name, NNCExternalFunction fn) {
|
| 53 |
+
getNNCFunctionRegistry()[name] = fn;
|
| 54 |
+
}
|
| 55 |
+
};
|
| 56 |
+
|
| 57 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/ScalarType.h>
|
| 3 |
+
#include <memory>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace jit {
|
| 7 |
+
namespace tensorexpr {
|
| 8 |
+
|
| 9 |
+
template <typename Node>
|
| 10 |
+
using NodePtr = std::shared_ptr<Node>;
|
| 11 |
+
|
| 12 |
+
template <typename To, typename From>
|
| 13 |
+
NodePtr<To> to(const NodePtr<From>& x) {
|
| 14 |
+
return std::dynamic_pointer_cast<To>(x);
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
template <typename To, typename From>
|
| 18 |
+
NodePtr<To> static_to(NodePtr<From> x) {
|
| 19 |
+
return std::static_pointer_cast<To>(x);
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
template <typename Node, typename... Args>
|
| 23 |
+
NodePtr<Node> alloc(Args&&... args) {
|
| 24 |
+
return std::make_shared<Node>(std::forward<Args>(args)...);
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
class Buf;
|
| 28 |
+
class Expr;
|
| 29 |
+
class Stmt;
|
| 30 |
+
class Var;
|
| 31 |
+
|
| 32 |
+
using BufPtr = NodePtr<Buf>;
|
| 33 |
+
using ExprPtr = NodePtr<Expr>;
|
| 34 |
+
using StmtPtr = NodePtr<Stmt>;
|
| 35 |
+
using VarPtr = NodePtr<Var>;
|
| 36 |
+
|
| 37 |
+
class ExprHandle;
|
| 38 |
+
class VarHandle;
|
| 39 |
+
class BufHandle;
|
| 40 |
+
|
| 41 |
+
class Add;
|
| 42 |
+
class And;
|
| 43 |
+
class BitCast;
|
| 44 |
+
class Broadcast;
|
| 45 |
+
class Cast;
|
| 46 |
+
class CompareSelect;
|
| 47 |
+
class Div;
|
| 48 |
+
class IfThenElse;
|
| 49 |
+
class Intrinsics;
|
| 50 |
+
class Let;
|
| 51 |
+
class Load;
|
| 52 |
+
class Lshift;
|
| 53 |
+
class Max;
|
| 54 |
+
class MaxTerm;
|
| 55 |
+
class Min;
|
| 56 |
+
class MinTerm;
|
| 57 |
+
class Mod;
|
| 58 |
+
class Mul;
|
| 59 |
+
class Or;
|
| 60 |
+
class Polynomial;
|
| 61 |
+
class Ramp;
|
| 62 |
+
class ReduceOp;
|
| 63 |
+
class RoundOff;
|
| 64 |
+
class Rshift;
|
| 65 |
+
class Store;
|
| 66 |
+
class Sub;
|
| 67 |
+
class Term;
|
| 68 |
+
class Xor;
|
| 69 |
+
using AddPtr = NodePtr<Add>;
|
| 70 |
+
using AndPtr = NodePtr<And>;
|
| 71 |
+
using BitCastPtr = NodePtr<BitCast>;
|
| 72 |
+
using BroadcastPtr = NodePtr<Broadcast>;
|
| 73 |
+
using CastPtr = NodePtr<Cast>;
|
| 74 |
+
using CompareSelectPtr = NodePtr<CompareSelect>;
|
| 75 |
+
using DivPtr = NodePtr<Div>;
|
| 76 |
+
using IfThenElsePtr = NodePtr<IfThenElse>;
|
| 77 |
+
using IntrinsicsPtr = NodePtr<Intrinsics>;
|
| 78 |
+
using LetPtr = NodePtr<Let>;
|
| 79 |
+
using LoadPtr = NodePtr<Load>;
|
| 80 |
+
using LshiftPtr = NodePtr<Lshift>;
|
| 81 |
+
using MaxPtr = NodePtr<Max>;
|
| 82 |
+
using MaxTermPtr = NodePtr<MaxTerm>;
|
| 83 |
+
using MinPtr = NodePtr<Min>;
|
| 84 |
+
using MinTermPtr = NodePtr<MinTerm>;
|
| 85 |
+
using ModPtr = NodePtr<Mod>;
|
| 86 |
+
using MulPtr = NodePtr<Mul>;
|
| 87 |
+
using OrPtr = NodePtr<Or>;
|
| 88 |
+
using PolynomialPtr = NodePtr<Polynomial>;
|
| 89 |
+
using RampPtr = NodePtr<Ramp>;
|
| 90 |
+
using ReduceOpPtr = NodePtr<ReduceOp>;
|
| 91 |
+
using RoundOffPtr = NodePtr<RoundOff>;
|
| 92 |
+
using RshiftPtr = NodePtr<Rshift>;
|
| 93 |
+
using StorePtr = NodePtr<Store>;
|
| 94 |
+
using SubPtr = NodePtr<Sub>;
|
| 95 |
+
using TermPtr = NodePtr<Term>;
|
| 96 |
+
using XorPtr = NodePtr<Xor>;
|
| 97 |
+
|
| 98 |
+
class Allocate;
|
| 99 |
+
class AtomicAdd;
|
| 100 |
+
class Block;
|
| 101 |
+
class Cond;
|
| 102 |
+
class ExternalCall;
|
| 103 |
+
class ExternalCallWithAlloc;
|
| 104 |
+
class For;
|
| 105 |
+
class Free;
|
| 106 |
+
class FreeExt;
|
| 107 |
+
class PlacementAllocate;
|
| 108 |
+
class SyncThreads;
|
| 109 |
+
using AllocatePtr = NodePtr<Allocate>;
|
| 110 |
+
using AtomicAddPtr = NodePtr<AtomicAdd>;
|
| 111 |
+
using BlockPtr = NodePtr<Block>;
|
| 112 |
+
using CondPtr = NodePtr<Cond>;
|
| 113 |
+
using ExternalCallPtr = NodePtr<ExternalCall>;
|
| 114 |
+
using ExternalCallWithAllocPtr = NodePtr<ExternalCallWithAlloc>;
|
| 115 |
+
using ForPtr = NodePtr<For>;
|
| 116 |
+
using FreePtr = NodePtr<Free>;
|
| 117 |
+
using FreeExtPtr = NodePtr<FreeExt>;
|
| 118 |
+
using PlacementAllocatePtr = NodePtr<PlacementAllocate>;
|
| 119 |
+
using SyncThreadsPtr = NodePtr<SyncThreads>;
|
| 120 |
+
|
| 121 |
+
#define IMM_DECLARE(Type, Name) \
|
| 122 |
+
class Name##Imm; \
|
| 123 |
+
using Name##ImmPtr = NodePtr<Name##Imm>;
|
| 124 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE);
|
| 125 |
+
#undef IMM_DECLARE
|
| 126 |
+
|
| 127 |
+
} // namespace tensorexpr
|
| 128 |
+
} // namespace jit
|
| 129 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::jit::tensorexpr {
|
| 6 |
+
|
| 7 |
+
// Optimize aten::cat ops in the given subgraph.
|
| 8 |
+
//
|
| 9 |
+
// Moving users of cat to its inputs.
|
| 10 |
+
// Cat ops get lowered into multiple loops, one per input. When the result
|
| 11 |
+
// of cat is used by some other op, it results in a situation where inlining
|
| 12 |
+
// of cat does not happen. This in turn results in intermediate buffers
|
| 13 |
+
// being created for the result of cat, since it is not inlined.
|
| 14 |
+
//
|
| 15 |
+
// For example, consider the following graph:
|
| 16 |
+
// graph(%x : Float(10, strides=[1], device=cpu),
|
| 17 |
+
// %y : Float(20, strides=[1], device=cpu)):
|
| 18 |
+
// %dim : int = prim::Constant[value=0]()
|
| 19 |
+
// %xy_list : Tensor[] = prim::ListConstruct(%x, %y)
|
| 20 |
+
// %cat : Float(60, strides=[1], device=cpu) = aten::cat(%xy_list, %dim)
|
| 21 |
+
// %5 : Float(60, strides=[1], device=cpu) = aten::log(%cat)
|
| 22 |
+
// return (%5))IR";
|
| 23 |
+
//
|
| 24 |
+
// This will get lowered into:
|
| 25 |
+
// Allocate(aten_cat);
|
| 26 |
+
// for (...)
|
| 27 |
+
// aten_cat[...] = x[...]
|
| 28 |
+
// for (...)
|
| 29 |
+
// aten_cat[...] = y[...]
|
| 30 |
+
// for (...)
|
| 31 |
+
// aten_log[...] = log(aten_cat[...])
|
| 32 |
+
// Free(aten_cat);
|
| 33 |
+
// Note that aten_cat is not inlined into aten_log and it results in
|
| 34 |
+
// an intermediate buffer allocation as well.
|
| 35 |
+
//
|
| 36 |
+
// Optimization:
|
| 37 |
+
// We move the ops that use the result of `cat` into its inputs whenever
|
| 38 |
+
// possible.
|
| 39 |
+
//
|
| 40 |
+
// The graph above will be transformed to:
|
| 41 |
+
// graph(%x : Float(10, strides=[1], device=cpu),
|
| 42 |
+
// %y : Float(20, strides=[1], device=cpu)):
|
| 43 |
+
// %3 : int = prim::Constant[value=0]()
|
| 44 |
+
// %7 : Float(10, strides=[1], device=cpu) = aten::log(%x)
|
| 45 |
+
// %8 : Float(20, strides=[1], device=cpu) = aten::log(%y)
|
| 46 |
+
// %9 : Tensor[] = prim::ListConstruct(%7, %8)
|
| 47 |
+
// %10 : Float(60, strides=[1], device=cpu) = aten::cat(%9, %3)
|
| 48 |
+
// return (%10)
|
| 49 |
+
//
|
| 50 |
+
// This will get lowered into:
|
| 51 |
+
// for (...)
|
| 52 |
+
// aten_cat[...] = log(x[...])
|
| 53 |
+
// for (...)
|
| 54 |
+
// aten_cat[...] = log(y[...])
|
| 55 |
+
// aten_cat is the output buffer here.
|
| 56 |
+
|
| 57 |
+
bool OptimizeCat(const std::shared_ptr<Graph>& graph);
|
| 58 |
+
|
| 59 |
+
TORCH_API void annotateInputShapes(
|
| 60 |
+
const std::shared_ptr<Graph>& graph,
|
| 61 |
+
const std::vector<std::optional<at::Tensor>>& example_inputs);
|
| 62 |
+
TORCH_API std::shared_ptr<Graph> removeUnusedSelfArgument(
|
| 63 |
+
const std::shared_ptr<Graph>& graph);
|
| 64 |
+
TORCH_API std::shared_ptr<Graph> removeGraphOutput(
|
| 65 |
+
const std::shared_ptr<Graph>& graph,
|
| 66 |
+
size_t idx);
|
| 67 |
+
TORCH_API std::shared_ptr<Graph> replaceListOutputWithTuple(
|
| 68 |
+
const std::shared_ptr<Graph>& graph);
|
| 69 |
+
|
| 70 |
+
// Perform \p ITERS rounds of "trimming" for the given \p GRAPH.
|
| 71 |
+
//
|
| 72 |
+
// Trimming means that we try to remove a small portion of the graph while
|
| 73 |
+
// keeping it valid. This is useful for debugging when we try to find a minimal
|
| 74 |
+
// example reproducing the issue at hand. When ITERS is 0, the graph remains
|
| 75 |
+
// unchanged, when ITERS is a big number, the graph usually becomes empty.
|
| 76 |
+
TORCH_API std::shared_ptr<Graph> trimGraph(
|
| 77 |
+
const std::shared_ptr<Graph>& graph,
|
| 78 |
+
int64_t iters);
|
| 79 |
+
|
| 80 |
+
// Scan all values in the given graph and replace each dimension with a size Xi
|
| 81 |
+
// present in \p SIZES with a symbolic shape Yi. Return a vector of symbol
|
| 82 |
+
// values [Y0, Y1, .., Yn].
|
| 83 |
+
//
|
| 84 |
+
// For example:
|
| 85 |
+
// Input:
|
| 86 |
+
// graph(%x : Float(10, 20, 30, 40)):
|
| 87 |
+
// %y : Float(10, 20, 30, 40) = aten::relu(%x)
|
| 88 |
+
// return %y
|
| 89 |
+
//
|
| 90 |
+
// If we run makeShapesSymbolic(graph, {20, 40}), then we'll get:
|
| 91 |
+
//
|
| 92 |
+
// graph(%x : Float(10, SS(-3), 30, SS(-5))):
|
| 93 |
+
// %y : Float(10, SS(-3), 30, SS(-5)) = aten::relu(%x)
|
| 94 |
+
// return %y
|
| 95 |
+
//
|
| 96 |
+
// and get {-3, -5} as the return value.
|
| 97 |
+
TORCH_API std::vector<int64_t> makeShapesSymbolic(
|
| 98 |
+
std::shared_ptr<Graph>& graph,
|
| 99 |
+
const std::vector<int64_t>& sizes);
|
| 100 |
+
|
| 101 |
+
// Inspect the graph and report whether it can be converted to TE IR.
|
| 102 |
+
// TODO: add error reporting for graphs that can't be converted.
|
| 103 |
+
TORCH_API bool isGraphCompilable(const std::shared_ptr<Graph>& graph);
|
| 104 |
+
|
| 105 |
+
// Examine the graph and (hackily) fill in missing tensor type info, such as
|
| 106 |
+
// scalar type, device, and strides. Ideally, this should be done by a proper
|
| 107 |
+
// dtype/device/shape propagation passes, but until they are ready we can use
|
| 108 |
+
// this, not always correct, workaround pass.
|
| 109 |
+
TORCH_API void fixupMissingShapeInfo(const std::shared_ptr<Graph>& graph);
|
| 110 |
+
|
| 111 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::jit::tensorexpr {
|
| 9 |
+
|
| 10 |
+
// Walk the Statement looking for Half size loads/stores.
|
| 11 |
+
class HalfChecker : public IRVisitor {
|
| 12 |
+
public:
|
| 13 |
+
HalfChecker(const std::vector<CodeGen::BufferArg>& args) {
|
| 14 |
+
for (const auto& BA : args) {
|
| 15 |
+
hasHalf_ |= BA.dtype().scalar_type() == ScalarType::Half;
|
| 16 |
+
}
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
bool hasHalf() const {
|
| 20 |
+
return hasHalf_;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
bool hasBFloat16() const {
|
| 24 |
+
return hasBFloat16_;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
void visit(const LoadPtr& v) override {
|
| 28 |
+
hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half;
|
| 29 |
+
hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16;
|
| 30 |
+
IRVisitor::visit(v);
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
void visit(const StorePtr& v) override {
|
| 34 |
+
hasHalf_ |= v->buf()->dtype().scalar_type() == ScalarType::Half;
|
| 35 |
+
hasBFloat16_ |= v->buf()->dtype().scalar_type() == ScalarType::BFloat16;
|
| 36 |
+
IRVisitor::visit(v);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
void visit(const HalfImmPtr& v) override {
|
| 40 |
+
hasHalf_ = true;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
void visit(const BFloat16ImmPtr& v) override {
|
| 44 |
+
hasBFloat16_ = true;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
void visit(const CastPtr& v) override {
|
| 48 |
+
hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half;
|
| 49 |
+
hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16;
|
| 50 |
+
IRVisitor::visit(v);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
private:
|
| 54 |
+
bool hasHalf_{false};
|
| 55 |
+
bool hasBFloat16_{false};
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
class HalfRewriter : public IRMutator {
|
| 59 |
+
ExprPtr mutate(const LoadPtr& v) override {
|
| 60 |
+
ExprPtr child = IRMutator::mutate(v);
|
| 61 |
+
if (!isHalf(child)) {
|
| 62 |
+
return child;
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
ExprPtr ret = alloc<Cast>(
|
| 66 |
+
child->dtype().cloneWithScalarType(ScalarType::Float), child);
|
| 67 |
+
|
| 68 |
+
inserted_half_casts_.insert(ret);
|
| 69 |
+
return ret;
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
StmtPtr mutate(const StorePtr& v) override {
|
| 73 |
+
// Since mutation changes the `value()` expression in-place, we need to
|
| 74 |
+
// get the dtype of the `value()` before that is mutated.
|
| 75 |
+
auto newType = v->value()->dtype();
|
| 76 |
+
ExprPtr new_val = v->value()->accept_mutator(this);
|
| 77 |
+
auto bufType = v->buf()->dtype();
|
| 78 |
+
|
| 79 |
+
if (isHalf(newType.scalar_type())) {
|
| 80 |
+
new_val = alloc<Cast>(newType, new_val);
|
| 81 |
+
inserted_half_casts_.insert(new_val);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
// The scalar_type of value is not Half while the buf is Half
|
| 85 |
+
if (!isHalf(newType.scalar_type()) && isHalf(bufType.scalar_type())) {
|
| 86 |
+
new_val = alloc<Cast>(
|
| 87 |
+
newType.cloneWithScalarType(bufType.scalar_type()), new_val);
|
| 88 |
+
inserted_half_casts_.insert(new_val);
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
v->set_value(new_val);
|
| 92 |
+
return v;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
ExprPtr mutate(const HalfImmPtr& v) override {
|
| 96 |
+
return alloc<Cast>(kFloat, v);
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
ExprPtr mutate(const BFloat16ImmPtr& v) override {
|
| 100 |
+
return alloc<Cast>(kFloat, v);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
ExprPtr mutate(const CastPtr& v) override {
|
| 104 |
+
ExprPtr child = v->src_value()->accept_mutator(this);
|
| 105 |
+
|
| 106 |
+
// just don't allow half casts we didn't insert.
|
| 107 |
+
if (isHalf(v)) {
|
| 108 |
+
if (inserted_half_casts_.count(v) < 1) {
|
| 109 |
+
v->set_src_value(child);
|
| 110 |
+
v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat));
|
| 111 |
+
return v;
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
// Remove Half(Float()) and friends.
|
| 116 |
+
CastPtr cast_child = to<Cast>(child);
|
| 117 |
+
if (cast_child) {
|
| 118 |
+
auto cast_to_double = v->dtype().scalar_type() == ScalarType::Double;
|
| 119 |
+
auto from_half = isHalf(cast_child->src_value());
|
| 120 |
+
// Cannot simplify the double(float(half)) to double(half) as NNC does
|
| 121 |
+
// not support cast BF16 to double directly.
|
| 122 |
+
auto not_cast_half_to_doulbe = !(cast_to_double && from_half);
|
| 123 |
+
if (v->dtype().is_floating_point() &&
|
| 124 |
+
cast_child->dtype().is_floating_point() && not_cast_half_to_doulbe) {
|
| 125 |
+
return alloc<Cast>(v->dtype(), cast_child->src_value());
|
| 126 |
+
}
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
if (child == v->src_value()) {
|
| 130 |
+
return v;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
return alloc<Cast>(v->dtype(), child);
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
StmtPtr mutate(const LetPtr& v) override {
|
| 137 |
+
if (isHalf(v->var()->dtype().scalar_type())) {
|
| 138 |
+
VarPtr load_new_var = alloc<Var>(v->var()->name_hint(), kFloat);
|
| 139 |
+
ExprPtr new_value = alloc<Cast>(
|
| 140 |
+
v->var()->dtype().cloneWithScalarType(ScalarType::Float),
|
| 141 |
+
v->value()->accept_mutator(this));
|
| 142 |
+
var_map[v->var()] = load_new_var;
|
| 143 |
+
|
| 144 |
+
return alloc<Let>(load_new_var, new_value);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
return IRMutator::mutate(v);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
ExprPtr mutate(const VarPtr& v) override {
|
| 151 |
+
auto it = var_map.find(v);
|
| 152 |
+
if (it != var_map.end()) {
|
| 153 |
+
return it->second;
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
return v;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
template <typename T>
|
| 160 |
+
ExprPtr mutateArithmetic(T v) {
|
| 161 |
+
IRMutator::mutate(v);
|
| 162 |
+
if (isHalf(v)) {
|
| 163 |
+
v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat));
|
| 164 |
+
}
|
| 165 |
+
return v;
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
ExprPtr mutate(const AddPtr& v) override {
|
| 169 |
+
return mutateArithmetic(v);
|
| 170 |
+
}
|
| 171 |
+
ExprPtr mutate(const SubPtr& v) override {
|
| 172 |
+
return mutateArithmetic(v);
|
| 173 |
+
}
|
| 174 |
+
ExprPtr mutate(const MulPtr& v) override {
|
| 175 |
+
return mutateArithmetic(v);
|
| 176 |
+
}
|
| 177 |
+
ExprPtr mutate(const DivPtr& v) override {
|
| 178 |
+
return mutateArithmetic(v);
|
| 179 |
+
}
|
| 180 |
+
ExprPtr mutate(const MaxPtr& v) override {
|
| 181 |
+
return mutateArithmetic(v);
|
| 182 |
+
}
|
| 183 |
+
ExprPtr mutate(const MinPtr& v) override {
|
| 184 |
+
return mutateArithmetic(v);
|
| 185 |
+
}
|
| 186 |
+
ExprPtr mutate(const CompareSelectPtr& v) override {
|
| 187 |
+
return mutateArithmetic(v);
|
| 188 |
+
}
|
| 189 |
+
ExprPtr mutate(const BroadcastPtr& v) override {
|
| 190 |
+
return mutateArithmetic(v);
|
| 191 |
+
}
|
| 192 |
+
ExprPtr mutate(const IfThenElsePtr& v) override {
|
| 193 |
+
return mutateArithmetic(v);
|
| 194 |
+
}
|
| 195 |
+
ExprPtr mutate(const IntrinsicsPtr& v) override {
|
| 196 |
+
return mutateArithmetic(v);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
private:
|
| 200 |
+
static bool isHalf(ScalarType st) {
|
| 201 |
+
return st == ScalarType::Half || st == ScalarType::BFloat16;
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
static bool isHalf(const ExprPtr& v) {
|
| 205 |
+
return isHalf(v->dtype().scalar_type());
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
std::unordered_set<ExprPtr> inserted_half_casts_;
|
| 209 |
+
std::unordered_map<VarPtr, VarPtr> var_map;
|
| 210 |
+
};
|
| 211 |
+
|
| 212 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 7 |
+
|
| 8 |
+
#include <utility>
|
| 9 |
+
|
| 10 |
+
namespace torch::jit::tensorexpr {
|
| 11 |
+
|
| 12 |
+
struct TORCH_API SimplifierHashType {
|
| 13 |
+
SimplifierHashType() = default;
|
| 14 |
+
explicit SimplifierHashType(size_t s) : _h(s) {}
|
| 15 |
+
|
| 16 |
+
bool operator==(const SimplifierHashType& other) const;
|
| 17 |
+
bool operator!=(const SimplifierHashType& other) const;
|
| 18 |
+
bool operator<(const SimplifierHashType& other) const;
|
| 19 |
+
bool operator==(const size_t other) const;
|
| 20 |
+
bool operator!=(const size_t other) const;
|
| 21 |
+
|
| 22 |
+
size_t _h{0};
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
} // namespace torch::jit::tensorexpr
|
| 26 |
+
|
| 27 |
+
namespace std {
|
| 28 |
+
template <>
|
| 29 |
+
struct hash<torch::jit::tensorexpr::SimplifierHashType> {
|
| 30 |
+
size_t operator()(const torch::jit::tensorexpr::SimplifierHashType& k) const {
|
| 31 |
+
return k._h;
|
| 32 |
+
}
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
} // namespace std
|
| 36 |
+
|
| 37 |
+
namespace torch::jit::tensorexpr {
|
| 38 |
+
|
| 39 |
+
#define CACHE_GUARD() \
|
| 40 |
+
if (cachedHash(v)) { \
|
| 41 |
+
return; \
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
class Term;
|
| 45 |
+
class Polynomial;
|
| 46 |
+
|
| 47 |
+
/* Expression hasher providing comparable values representing sub-exprs.
|
| 48 |
+
* Uses memoization to avoid excessive recursion. */
|
| 49 |
+
class TORCH_API HashProvider : public IRVisitor {
|
| 50 |
+
public:
|
| 51 |
+
template <class T>
|
| 52 |
+
SimplifierHashType hash(T e) {
|
| 53 |
+
e->accept(this);
|
| 54 |
+
return hashOf(e);
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
bool cachedHash(const ExprPtr& e) {
|
| 58 |
+
return exprToHash_.find(e) != exprToHash_.end();
|
| 59 |
+
}
|
| 60 |
+
bool cachedHash(const StmtPtr& s) {
|
| 61 |
+
return stmtToHash_.find(s) != stmtToHash_.end();
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
void clearCache() {
|
| 65 |
+
exprToHash_.clear();
|
| 66 |
+
stmtToHash_.clear();
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
void visit(const AddPtr& v) override;
|
| 70 |
+
void visit(const SubPtr& v) override;
|
| 71 |
+
void visit(const MulPtr& v) override;
|
| 72 |
+
void visit(const DivPtr& v) override;
|
| 73 |
+
void visit(const ModPtr& v) override;
|
| 74 |
+
void visit(const RoundOffPtr& v) override;
|
| 75 |
+
void visit(const MaxPtr& v) override;
|
| 76 |
+
void visit(const MinPtr& v) override;
|
| 77 |
+
void visit(const AndPtr& v) override;
|
| 78 |
+
void visit(const OrPtr& v) override;
|
| 79 |
+
void visit(const XorPtr& v) override;
|
| 80 |
+
void visit(const LshiftPtr& v) override;
|
| 81 |
+
void visit(const RshiftPtr& v) override;
|
| 82 |
+
void visit(const CompareSelectPtr& v) override;
|
| 83 |
+
|
| 84 |
+
#define IMM_VISIT(Type, Name) \
|
| 85 |
+
void visit(const Name##ImmPtr& v) override { \
|
| 86 |
+
CACHE_GUARD(); \
|
| 87 |
+
putHash(v, hash_combine(#Name, v->value())); \
|
| 88 |
+
}
|
| 89 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_VISIT);
|
| 90 |
+
#undef IMM_VISIT
|
| 91 |
+
|
| 92 |
+
void visit(const CastPtr& v) override;
|
| 93 |
+
void visit(const VarPtr& v) override;
|
| 94 |
+
void visit(const RampPtr& v) override;
|
| 95 |
+
void visit(const LoadPtr& v) override;
|
| 96 |
+
void visit(const StorePtr& v) override;
|
| 97 |
+
void visit(const BlockPtr& v) override;
|
| 98 |
+
void visit(const ForPtr& v) override;
|
| 99 |
+
void visit(const BroadcastPtr& v) override;
|
| 100 |
+
void visit(const IfThenElsePtr& v) override;
|
| 101 |
+
void visit(const IntrinsicsPtr& v) override;
|
| 102 |
+
void visit(const AllocatePtr& v) override;
|
| 103 |
+
void visit(const FreePtr& v) override;
|
| 104 |
+
void visit(const CondPtr& v) override;
|
| 105 |
+
void visit(const TermPtr& v) override;
|
| 106 |
+
void visit(const PolynomialPtr& v) override;
|
| 107 |
+
void visit(const MaxTermPtr& v) override;
|
| 108 |
+
void visit(const MinTermPtr& v) override;
|
| 109 |
+
|
| 110 |
+
template <typename... Types>
|
| 111 |
+
SimplifierHashType hash_combine(const Types&... args) {
|
| 112 |
+
SimplifierHashType seed;
|
| 113 |
+
_hash_combine(seed, args...);
|
| 114 |
+
return seed;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
private:
|
| 118 |
+
SimplifierHashType hashOf(const ExprPtr& e) {
|
| 119 |
+
auto it = exprToHash_.find(e);
|
| 120 |
+
if (it != exprToHash_.end()) {
|
| 121 |
+
return it->second;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
// As a failsafe fall back to IRPrinter.
|
| 125 |
+
std::stringstream ss;
|
| 126 |
+
IRPrinter printer(ss);
|
| 127 |
+
e->accept(&printer);
|
| 128 |
+
SimplifierHashType hash = SimplifierHashType(te_hash(ss.str()));
|
| 129 |
+
putHash(e, hash);
|
| 130 |
+
|
| 131 |
+
return hash;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
SimplifierHashType hashOf(const StmtPtr& s) {
|
| 135 |
+
auto it = stmtToHash_.find(s);
|
| 136 |
+
if (it != stmtToHash_.end()) {
|
| 137 |
+
return it->second;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
// As a failsafe fall back to IRPrinter.
|
| 141 |
+
std::stringstream ss;
|
| 142 |
+
IRPrinter printer(ss);
|
| 143 |
+
s->accept(&printer);
|
| 144 |
+
SimplifierHashType hash = SimplifierHashType(te_hash(ss.str()));
|
| 145 |
+
putHash(s, hash);
|
| 146 |
+
|
| 147 |
+
return hash;
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
// Hash funcs for various types, numbers are random.
|
| 151 |
+
template <typename T>
|
| 152 |
+
void _hash_combine(SimplifierHashType& seed, const T& val) {
|
| 153 |
+
seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
void _hash_combine(SimplifierHashType& seed, const char* val) {
|
| 157 |
+
seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
|
| 158 |
+
}
|
| 159 |
+
|
| 160 |
+
// at:::Half doesn't have a prime_number_hash, so cast to short.
|
| 161 |
+
void _hash_combine(SimplifierHashType& seed, const at::Half& val) {
|
| 162 |
+
seed._h ^=
|
| 163 |
+
te_hash((uint16_t)val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
void _hash_combine(SimplifierHashType& seed, const Dtype& val) {
|
| 167 |
+
seed._h ^= te_hash(val.ToCppString()) + 0x1f752c19 + (seed._h << 7) +
|
| 168 |
+
(seed._h >> 4);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
void _hash_combine(SimplifierHashType& seed, ExprPtr e) {
|
| 172 |
+
_hash_combine(seed, hash(std::move(e)));
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
template <typename T, typename... Types>
|
| 176 |
+
void _hash_combine(
|
| 177 |
+
SimplifierHashType& seed,
|
| 178 |
+
const T& val,
|
| 179 |
+
const Types&... args) {
|
| 180 |
+
_hash_combine(seed, val);
|
| 181 |
+
_hash_combine(seed, args...);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
void putHash(const ExprPtr& e, SimplifierHashType h) {
|
| 185 |
+
auto res = exprToHash_.emplace(e, h);
|
| 186 |
+
if (res.second == false) {
|
| 187 |
+
// This is always a logic bug since we should check the cache first.
|
| 188 |
+
throw std::runtime_error("hash collision");
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
void putHash(const StmtPtr& s, SimplifierHashType h) {
|
| 192 |
+
auto res = stmtToHash_.emplace(s, h);
|
| 193 |
+
if (res.second == false) {
|
| 194 |
+
// This is always a logic bug since we should check the cache first.
|
| 195 |
+
throw std::runtime_error("hash collision");
|
| 196 |
+
}
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
std::unordered_map<ExprPtr, SimplifierHashType> exprToHash_;
|
| 200 |
+
std::unordered_map<StmtPtr, SimplifierHashType> stmtToHash_;
|
| 201 |
+
UniqueNameManager name_manager_;
|
| 202 |
+
|
| 203 |
+
size_t te_hash(SimplifierHashType val) {
|
| 204 |
+
return val._h;
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
size_t te_hash(int64_t val) {
|
| 208 |
+
// put the thing down.
|
| 209 |
+
size_t h = val ^ 0x647AA4D20C0B;
|
| 210 |
+
// bit flip it.
|
| 211 |
+
size_t h2 = ~h;
|
| 212 |
+
// and reverse byte order.
|
| 213 |
+
size_t h3 = 0;
|
| 214 |
+
for (unsigned int i = 0; i < 64; i += 8) {
|
| 215 |
+
h3 |= ((h2 >> i) & 0xFF) << (64 - i - 8);
|
| 216 |
+
}
|
| 217 |
+
return h3;
|
| 218 |
+
}
|
| 219 |
+
|
| 220 |
+
size_t te_hash(int32_t val) {
|
| 221 |
+
int64_t v2 = val;
|
| 222 |
+
return te_hash(v2);
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
size_t te_hash(uint32_t val) {
|
| 226 |
+
int64_t v2 = val;
|
| 227 |
+
return te_hash(v2);
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
size_t te_hash(uint64_t val) {
|
| 231 |
+
int64_t v2 = val;
|
| 232 |
+
return te_hash(v2);
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
size_t te_hash(int16_t val) {
|
| 236 |
+
int64_t v2 = val;
|
| 237 |
+
return te_hash(v2);
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
size_t te_hash(std::string val) {
|
| 241 |
+
size_t hash{0};
|
| 242 |
+
int64_t intval{0};
|
| 243 |
+
int64_t s = val.size() - 1;
|
| 244 |
+
while (s >= 0) {
|
| 245 |
+
for (unsigned int i = 0; i < 8; ++i) {
|
| 246 |
+
if (s < 0)
|
| 247 |
+
break;
|
| 248 |
+
int64_t c = val[s];
|
| 249 |
+
intval |= (c << (i * 8));
|
| 250 |
+
|
| 251 |
+
s--;
|
| 252 |
+
}
|
| 253 |
+
hash ^= te_hash(intval);
|
| 254 |
+
intval = 0;
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
return hash;
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
size_t te_hash(double d) {
|
| 261 |
+
int64_t* n = reinterpret_cast<int64_t*>(&d);
|
| 262 |
+
return te_hash(*n);
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
size_t te_hash(float d) {
|
| 266 |
+
int32_t* n = reinterpret_cast<int32_t*>(&d);
|
| 267 |
+
return te_hash(*n);
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
size_t te_hash(at::Half d) {
|
| 271 |
+
int16_t* n = reinterpret_cast<int16_t*>(&d);
|
| 272 |
+
return te_hash(*n);
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
size_t te_hash(at::BFloat16 d) {
|
| 276 |
+
int16_t* n = reinterpret_cast<int16_t*>(&d);
|
| 277 |
+
return te_hash(*n);
|
| 278 |
+
}
|
| 279 |
+
};
|
| 280 |
+
|
| 281 |
+
} // namespace torch::jit::tensorexpr
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef TORCH_ENABLE_LLVM
|
| 4 |
+
#include <c10/util/ArrayRef.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace jit {
|
| 8 |
+
namespace tensorexpr {
|
| 9 |
+
|
| 10 |
+
struct SymbolAddress {
|
| 11 |
+
const char* symbol;
|
| 12 |
+
void* address;
|
| 13 |
+
|
| 14 |
+
SymbolAddress(const char* sym, void* addr) : symbol(sym), address(addr) {}
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
c10::ArrayRef<SymbolAddress> getIntrinsicSymbols();
|
| 18 |
+
|
| 19 |
+
} // namespace tensorexpr
|
| 20 |
+
} // namespace jit
|
| 21 |
+
} // namespace torch
|
| 22 |
+
#endif // TORCH_ENABLE_LLVM
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/ScalarType.h>
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 5 |
+
|
| 6 |
+
namespace torch::jit::tensorexpr {
|
| 7 |
+
|
| 8 |
+
class TORCH_API IRMutator {
|
| 9 |
+
public:
|
| 10 |
+
virtual ~IRMutator() = default;
|
| 11 |
+
virtual ExprPtr mutate(const AddPtr& v);
|
| 12 |
+
virtual ExprPtr mutate(const SubPtr& v);
|
| 13 |
+
virtual ExprPtr mutate(const MulPtr& v);
|
| 14 |
+
virtual ExprPtr mutate(const DivPtr& v);
|
| 15 |
+
virtual ExprPtr mutate(const ModPtr& v);
|
| 16 |
+
virtual ExprPtr mutate(const MaxPtr& v);
|
| 17 |
+
virtual ExprPtr mutate(const MinPtr& v);
|
| 18 |
+
virtual ExprPtr mutate(const AndPtr& v);
|
| 19 |
+
virtual ExprPtr mutate(const OrPtr& v);
|
| 20 |
+
virtual ExprPtr mutate(const XorPtr& v);
|
| 21 |
+
virtual ExprPtr mutate(const LshiftPtr& v);
|
| 22 |
+
virtual ExprPtr mutate(const RshiftPtr& v);
|
| 23 |
+
virtual ExprPtr mutate(const CompareSelectPtr& v);
|
| 24 |
+
#define IMM_MUTATE_DECLARE(Type, Name) \
|
| 25 |
+
virtual ExprPtr mutate(const Name##ImmPtr& v);
|
| 26 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE);
|
| 27 |
+
#undef IMM_MUTATE_DECLARE
|
| 28 |
+
virtual ExprPtr mutate(const CastPtr& v);
|
| 29 |
+
virtual ExprPtr mutate(const BitCastPtr& v);
|
| 30 |
+
virtual ExprPtr mutate(const VarPtr& v);
|
| 31 |
+
virtual ExprPtr mutate(const BufPtr& v);
|
| 32 |
+
virtual ExprPtr mutate(const RampPtr& v);
|
| 33 |
+
virtual ExprPtr mutate(const LoadPtr& v);
|
| 34 |
+
virtual ExprPtr mutate(const BroadcastPtr& v);
|
| 35 |
+
virtual ExprPtr mutate(const IfThenElsePtr& v);
|
| 36 |
+
virtual ExprPtr mutate(const IntrinsicsPtr& v);
|
| 37 |
+
|
| 38 |
+
virtual ExprPtr mutate(const TermPtr& v);
|
| 39 |
+
virtual ExprPtr mutate(const PolynomialPtr& v);
|
| 40 |
+
virtual ExprPtr mutate(const RoundOffPtr& v);
|
| 41 |
+
virtual ExprPtr mutate(const MaxTermPtr& v);
|
| 42 |
+
virtual ExprPtr mutate(const MinTermPtr& v);
|
| 43 |
+
|
| 44 |
+
virtual ExprPtr mutate(const ReduceOpPtr& v);
|
| 45 |
+
|
| 46 |
+
virtual StmtPtr mutate(const ForPtr& v);
|
| 47 |
+
virtual StmtPtr mutate(const BlockPtr& v);
|
| 48 |
+
virtual StmtPtr mutate(const StorePtr& v);
|
| 49 |
+
virtual StmtPtr mutate(const AtomicAddPtr& v);
|
| 50 |
+
virtual StmtPtr mutate(const SyncThreadsPtr& v);
|
| 51 |
+
virtual StmtPtr mutate(const ExternalCallPtr& v);
|
| 52 |
+
virtual StmtPtr mutate(const ExternalCallWithAllocPtr& v);
|
| 53 |
+
|
| 54 |
+
virtual StmtPtr mutate(const AllocatePtr& v);
|
| 55 |
+
virtual StmtPtr mutate(const FreePtr& v);
|
| 56 |
+
virtual StmtPtr mutate(const FreeExtPtr& v);
|
| 57 |
+
virtual StmtPtr mutate(const PlacementAllocatePtr& v);
|
| 58 |
+
virtual StmtPtr mutate(const LetPtr& v);
|
| 59 |
+
virtual StmtPtr mutate(const CondPtr& v);
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
} // namespace torch::jit::tensorexpr
|