Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/message.h +193 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_functions.h +70 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_no_python.h +119 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_agent.h +341 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h +420 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_agent.h +495 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h +90 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h +17 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h +33 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h +40 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h +175 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h +406 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h +150 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h +80 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h +128 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h +283 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h +102 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h +36 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h +295 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h +104 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h +347 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h +91 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h +499 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h +115 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h +29 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h +61 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h +129 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h +115 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h +217 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h +304 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h +934 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_cloner.h +65 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h +66 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_simplifier.h +554 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h +58 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h +64 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h +382 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_codegen.h +143 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_jit.h +77 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest.h +606 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest_randomization.h +13 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/lowerings.h +49 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/mem_dependency_checker.h +415 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/conv2d.h +105 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/matmul.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/misc.h +98 -0
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/message.h
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/types.h>
|
| 4 |
+
#include <vector>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace distributed {
|
| 8 |
+
namespace rpc {
|
| 9 |
+
|
| 10 |
+
// An enum denoting common RPC errors to allow specific error handling for them.
|
| 11 |
+
enum RPCErrorType {
|
| 12 |
+
UNKNOWN_ERROR = 0, /* Indicates that error type could not be parsed */
|
| 13 |
+
TIMEOUT = 1, /* Indicates that the RPC has timed out */
|
| 14 |
+
INTENTIONAL_FAILURE = 2 /* Deliberate failure, such as those injected by
|
| 15 |
+
FaultyAgent for testing */
|
| 16 |
+
};
|
| 17 |
+
|
| 18 |
+
// The enum values are bitwise ORed with MessageType
|
| 19 |
+
// They are bit flags starting from 0x100 and should have
|
| 20 |
+
// value such as 0x100, 0x200, 0x400, 0x800, 0xF00, etc.
|
| 21 |
+
enum MessageTypeFlags {
|
| 22 |
+
REQUEST_TYPE = 0x100,
|
| 23 |
+
RESPONSE_TYPE = 0x200,
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
// Message types must have values between 0x00 to 0xff
|
| 27 |
+
enum MessageType {
|
| 28 |
+
// messages for dist.rpc on builtin operators
|
| 29 |
+
SCRIPT_CALL = 0x00 | MessageTypeFlags::REQUEST_TYPE,
|
| 30 |
+
SCRIPT_RET = 0x01 | MessageTypeFlags::RESPONSE_TYPE,
|
| 31 |
+
|
| 32 |
+
// messages for dist.rpc on Python UDF
|
| 33 |
+
PYTHON_CALL = 0x02 | MessageTypeFlags::REQUEST_TYPE,
|
| 34 |
+
PYTHON_RET = 0x03 | MessageTypeFlags::RESPONSE_TYPE,
|
| 35 |
+
|
| 36 |
+
// messages for dist.remote on builtin operators and Python UDF
|
| 37 |
+
SCRIPT_REMOTE_CALL = 0x04 |
|
| 38 |
+
MessageTypeFlags::REQUEST_TYPE, // A remote call on a builtin operator
|
| 39 |
+
PYTHON_REMOTE_CALL =
|
| 40 |
+
0x05 | MessageTypeFlags::REQUEST_TYPE, // A remote call on a Python UDF
|
| 41 |
+
REMOTE_RET =
|
| 42 |
+
0x06 | MessageTypeFlags::RESPONSE_TYPE, // Response for remote calls for
|
| 43 |
+
// UDF, builtin, or script
|
| 44 |
+
|
| 45 |
+
// RRef related internal messages
|
| 46 |
+
SCRIPT_RREF_FETCH_CALL =
|
| 47 |
+
0x07 | MessageTypeFlags::REQUEST_TYPE, // A UserRRef<IValue> fetches value
|
| 48 |
+
// from owner
|
| 49 |
+
PYTHON_RREF_FETCH_CALL =
|
| 50 |
+
0x08 | MessageTypeFlags::REQUEST_TYPE, // A UserRRef<py::object> fetches
|
| 51 |
+
// value from owner
|
| 52 |
+
SCRIPT_RREF_FETCH_RET = 0x09 |
|
| 53 |
+
MessageTypeFlags::RESPONSE_TYPE, // An OwnerRRef sends ivalue to user
|
| 54 |
+
PYTHON_RREF_FETCH_RET = 0x0a |
|
| 55 |
+
MessageTypeFlags::RESPONSE_TYPE, // An OwnerRRef sends py::object to user
|
| 56 |
+
RREF_USER_DELETE = 0x0b |
|
| 57 |
+
MessageTypeFlags::REQUEST_TYPE, // A UserRRef tells the owner to deref
|
| 58 |
+
RREF_FORK_REQUEST =
|
| 59 |
+
0x0c | MessageTypeFlags::REQUEST_TYPE, // A child UserRRef tells the owner
|
| 60 |
+
// about itself
|
| 61 |
+
RREF_CHILD_ACCEPT =
|
| 62 |
+
0x0d | MessageTypeFlags::REQUEST_TYPE, // A child UserRRef tells parent
|
| 63 |
+
// that owner knows it
|
| 64 |
+
RREF_ACK =
|
| 65 |
+
0x0e | MessageTypeFlags::RESPONSE_TYPE, // ACK to internal RRef messages
|
| 66 |
+
|
| 67 |
+
// Messages with autograd info
|
| 68 |
+
FORWARD_AUTOGRAD_REQ = 0x0f | MessageTypeFlags::REQUEST_TYPE,
|
| 69 |
+
FORWARD_AUTOGRAD_RESP = 0x10 | MessageTypeFlags::RESPONSE_TYPE,
|
| 70 |
+
|
| 71 |
+
// Messages to propagate gradients on the backward pass.
|
| 72 |
+
BACKWARD_AUTOGRAD_REQ = 0x11 | MessageTypeFlags::REQUEST_TYPE,
|
| 73 |
+
BACKWARD_AUTOGRAD_RESP = 0x12 | MessageTypeFlags::RESPONSE_TYPE,
|
| 74 |
+
|
| 75 |
+
// Messages to tell workers to clean up their autograd context.
|
| 76 |
+
CLEANUP_AUTOGRAD_CONTEXT_REQ = 0x13 | MessageTypeFlags::REQUEST_TYPE,
|
| 77 |
+
CLEANUP_AUTOGRAD_CONTEXT_RESP = 0x14 | MessageTypeFlags::RESPONSE_TYPE,
|
| 78 |
+
|
| 79 |
+
// Messages that tell workers to run requests with profiling enabled.
|
| 80 |
+
RUN_WITH_PROFILING_REQ = 0x15 | MessageTypeFlags::REQUEST_TYPE,
|
| 81 |
+
RUN_WITH_PROFILING_RESP = 0x16 | MessageTypeFlags::RESPONSE_TYPE,
|
| 82 |
+
|
| 83 |
+
// Messages to support RRef.backward().
|
| 84 |
+
RREF_BACKWARD_REQ = 0x17 | MessageTypeFlags::REQUEST_TYPE,
|
| 85 |
+
RREF_BACKWARD_RESP = 0x18 | MessageTypeFlags::RESPONSE_TYPE,
|
| 86 |
+
|
| 87 |
+
// Other internal message types
|
| 88 |
+
EXCEPTION = 0x37 | MessageTypeFlags::RESPONSE_TYPE,
|
| 89 |
+
UNKNOWN = 0x3c
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
// A message to be sent/received by an RpcAgent.
|
| 93 |
+
//
|
| 94 |
+
// A Message object contains 4 fields:
|
| 95 |
+
// payload (std::vector<char>): a binary chunk of data.
|
| 96 |
+
// tensors (std::vector<torch::Tensor>): all tensors. Tensor data are not
|
| 97 |
+
// included in the payload, and it is up to the RpcAgent implementation
|
| 98 |
+
// to determine how to serialize them. This design is helpful for
|
| 99 |
+
// communicating super large tensors where serializing all the data at
|
| 100 |
+
// once leads to excessively large memory footprint. An implementation
|
| 101 |
+
// can then serialize and send tensors chunk-by-chunk, in the streaming
|
| 102 |
+
// fashion.
|
| 103 |
+
// type (MessageType): type of the message.
|
| 104 |
+
// id (int64_t): message id, this is used to match request and response.
|
| 105 |
+
// Other implementation can ignore it if they have their own
|
| 106 |
+
// ways to do matching.
|
| 107 |
+
//
|
| 108 |
+
// Layers above ``RpcAgent`` only converts ScriptCall, ScriptResp, PythonCall,
|
| 109 |
+
// and PythonResp into a Message, and it is up to the RpcAgent
|
| 110 |
+
// implementation to determine how to serialize a message.
|
| 111 |
+
class TORCH_API Message final : public torch::CustomClassHolder {
|
| 112 |
+
private:
|
| 113 |
+
// Keep these private in order to force users to go through make_intrusive and
|
| 114 |
+
// thus prevent creating a Message that's not held by an intrusive_ptr.
|
| 115 |
+
Message();
|
| 116 |
+
|
| 117 |
+
Message(
|
| 118 |
+
std::vector<char>&& payload,
|
| 119 |
+
std::vector<torch::Tensor>&& tensors,
|
| 120 |
+
MessageType type);
|
| 121 |
+
|
| 122 |
+
Message(
|
| 123 |
+
std::vector<char>&& payload,
|
| 124 |
+
std::vector<torch::Tensor>&& tensors,
|
| 125 |
+
MessageType type,
|
| 126 |
+
int64_t id);
|
| 127 |
+
|
| 128 |
+
friend c10::intrusive_ptr<Message>;
|
| 129 |
+
|
| 130 |
+
public:
|
| 131 |
+
Message(const Message& other) = delete;
|
| 132 |
+
Message(Message&& other) = delete;
|
| 133 |
+
Message& operator=(Message const& rhs) = delete;
|
| 134 |
+
Message& operator=(Message&& rhs) = delete;
|
| 135 |
+
|
| 136 |
+
// Destructively retrieves the payload.
|
| 137 |
+
std::vector<char>&& movePayload() &&;
|
| 138 |
+
std::vector<torch::Tensor>&& moveTensors() &&;
|
| 139 |
+
|
| 140 |
+
std::vector<char>& payload();
|
| 141 |
+
const std::vector<char>& payload() const;
|
| 142 |
+
std::vector<torch::Tensor>& tensors();
|
| 143 |
+
const std::vector<torch::Tensor>& tensors() const;
|
| 144 |
+
MessageType type() const;
|
| 145 |
+
|
| 146 |
+
bool isRequest() const;
|
| 147 |
+
bool isResponse() const;
|
| 148 |
+
bool isShutdown() const;
|
| 149 |
+
|
| 150 |
+
// id is an optional field to match request/response. If an RpcAgent
|
| 151 |
+
// implementation is able to do the matching without using this id, it can be
|
| 152 |
+
// dropped during message serialization.
|
| 153 |
+
int64_t id() const;
|
| 154 |
+
void setId(int64_t id);
|
| 155 |
+
|
| 156 |
+
std::vector<c10::weak_intrusive_ptr<c10::StorageImpl>> getStorages() const;
|
| 157 |
+
|
| 158 |
+
private:
|
| 159 |
+
std::vector<char> payload_;
|
| 160 |
+
std::vector<torch::Tensor> tensors_;
|
| 161 |
+
MessageType type_ = MessageType::UNKNOWN;
|
| 162 |
+
int64_t id_ = -1;
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
// Create a response Message of type Exception.
|
| 166 |
+
// The exception string representation will be used as the message's payload.
|
| 167 |
+
// A message ID corresponding to the request that resulted in this response can
|
| 168 |
+
// be provided for matching requests/responses.
|
| 169 |
+
TORCH_API c10::intrusive_ptr<Message> createExceptionResponse(
|
| 170 |
+
const std::exception& e,
|
| 171 |
+
int64_t id);
|
| 172 |
+
|
| 173 |
+
// Create a response Message of type Exception.
|
| 174 |
+
// The passed in string representation will be used as the message's payload.
|
| 175 |
+
// A message ID corresponding to the request that resulted in this response can
|
| 176 |
+
// be provided for matching requests/responses.
|
| 177 |
+
TORCH_API c10::intrusive_ptr<Message> createExceptionResponse(
|
| 178 |
+
const std::string& exceptionStr,
|
| 179 |
+
int64_t id);
|
| 180 |
+
|
| 181 |
+
inline std::tuple<
|
| 182 |
+
c10::intrusive_ptr<Message>,
|
| 183 |
+
std::vector<c10::weak_intrusive_ptr<c10::StorageImpl>>>
|
| 184 |
+
withStorages(c10::intrusive_ptr<Message> message) {
|
| 185 |
+
auto storages = message->getStorages();
|
| 186 |
+
return std::make_tuple(std::move(message), std::move(storages));
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
using JitFuture = c10::ivalue::Future;
|
| 190 |
+
|
| 191 |
+
} // namespace rpc
|
| 192 |
+
} // namespace distributed
|
| 193 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_functions.h
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/py_rref.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 5 |
+
#include <torch/csrc/jit/python/pybind_utils.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace distributed {
|
| 10 |
+
namespace rpc {
|
| 11 |
+
|
| 12 |
+
// Converts an internal ivalue::Future of Message into a user-facing
|
| 13 |
+
// ivalue::Future of py::object type by creating a new ivalue::Future and call
|
| 14 |
+
// its markCompleted as a callback in the given ivalue::Future.
|
| 15 |
+
// If hasValue is true, the Message will be converted into a py::object and then
|
| 16 |
+
// wrap it with an IValue. If hasValue is false, this ivalue::Future is only
|
| 17 |
+
// used for signaling and launching callbacks. In this case, the message will be
|
| 18 |
+
// discarded and then set the ivalue::Future using an empty IValue or the given
|
| 19 |
+
// FutureError if there is an error.
|
| 20 |
+
c10::intrusive_ptr<JitFuture> toPyJitFuture(
|
| 21 |
+
const c10::intrusive_ptr<JitFuture>& messageJitFuture,
|
| 22 |
+
bool hasValue = true);
|
| 23 |
+
|
| 24 |
+
c10::intrusive_ptr<JitFuture> pyRpcBuiltin(
|
| 25 |
+
const WorkerInfo& dst,
|
| 26 |
+
const std::string& opName,
|
| 27 |
+
const py::args& args,
|
| 28 |
+
const py::kwargs& kwargs,
|
| 29 |
+
const float rpcTimeoutSeconds);
|
| 30 |
+
|
| 31 |
+
c10::intrusive_ptr<JitFuture> pyRpcPythonUdf(
|
| 32 |
+
const WorkerInfo& dst,
|
| 33 |
+
std::string& pickledPythonUDF,
|
| 34 |
+
std::vector<torch::Tensor>& tensors,
|
| 35 |
+
const float rpcTimeoutSeconds,
|
| 36 |
+
const bool isAsyncExecution);
|
| 37 |
+
|
| 38 |
+
c10::intrusive_ptr<JitFuture> pyRpcTorchscript(
|
| 39 |
+
const std::string& dstWorkerName,
|
| 40 |
+
const std::string& qualifiedNameStr,
|
| 41 |
+
const py::tuple& argsTuple,
|
| 42 |
+
const py::dict& kwargsDict,
|
| 43 |
+
const float rpcTimeoutSeconds,
|
| 44 |
+
const bool isAsyncExecution);
|
| 45 |
+
|
| 46 |
+
PyRRef pyRemoteBuiltin(
|
| 47 |
+
const WorkerInfo& dst,
|
| 48 |
+
const std::string& opName,
|
| 49 |
+
const float rpcTimeoutSeconds,
|
| 50 |
+
const py::args& args,
|
| 51 |
+
const py::kwargs& kwargs);
|
| 52 |
+
|
| 53 |
+
PyRRef pyRemotePythonUdf(
|
| 54 |
+
const WorkerInfo& dst,
|
| 55 |
+
std::string& pickledPythonUDF,
|
| 56 |
+
std::vector<torch::Tensor>& tensors,
|
| 57 |
+
const float rpcTimeoutSeconds,
|
| 58 |
+
const bool isAsyncExecution);
|
| 59 |
+
|
| 60 |
+
PyRRef pyRemoteTorchscript(
|
| 61 |
+
const std::string& dstWorkerName,
|
| 62 |
+
const std::string& qualifiedNameStr,
|
| 63 |
+
const float rpcTimeoutSeconds,
|
| 64 |
+
const bool isAsyncExecution,
|
| 65 |
+
const py::args& args,
|
| 66 |
+
const py::kwargs& kwargs);
|
| 67 |
+
|
| 68 |
+
} // namespace rpc
|
| 69 |
+
} // namespace distributed
|
| 70 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_no_python.h
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/request_callback.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/script_call.h>
|
| 8 |
+
#include <torch/csrc/distributed/rpc/script_remote_call.h>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace distributed {
|
| 12 |
+
namespace rpc {
|
| 13 |
+
|
| 14 |
+
// RequestCallback implementation with no Python dependencies.
|
| 15 |
+
class TORCH_API RequestCallbackNoPython : public RequestCallback {
|
| 16 |
+
public:
|
| 17 |
+
c10::intrusive_ptr<JitFuture> processMessage(
|
| 18 |
+
Message& request,
|
| 19 |
+
std::vector<c10::Stream> streams) const override;
|
| 20 |
+
|
| 21 |
+
protected:
|
| 22 |
+
virtual std::unique_ptr<RpcCommandBase> deserializePythonRpcCommand(
|
| 23 |
+
std::unique_ptr<RpcCommandBase> rpc,
|
| 24 |
+
const MessageType& messageType) const;
|
| 25 |
+
|
| 26 |
+
virtual c10::intrusive_ptr<JitFuture> processScriptCall(
|
| 27 |
+
RpcCommandBase& rpc,
|
| 28 |
+
std::vector<c10::Stream> streams) const;
|
| 29 |
+
|
| 30 |
+
virtual c10::intrusive_ptr<JitFuture> processPythonCall(
|
| 31 |
+
RpcCommandBase& rpc,
|
| 32 |
+
std::vector<c10::Stream> streams) const;
|
| 33 |
+
|
| 34 |
+
c10::intrusive_ptr<JitFuture> assignOwnerRRef(
|
| 35 |
+
const RRefId& rrefId,
|
| 36 |
+
const RRefId& forkId,
|
| 37 |
+
c10::intrusive_ptr<JitFuture> valueFuture) const;
|
| 38 |
+
|
| 39 |
+
virtual c10::intrusive_ptr<JitFuture> processScriptRemoteCall(
|
| 40 |
+
RpcCommandBase& rpc,
|
| 41 |
+
std::vector<c10::Stream> streams) const;
|
| 42 |
+
|
| 43 |
+
virtual c10::intrusive_ptr<JitFuture> processPythonRemoteCall(
|
| 44 |
+
RpcCommandBase& rpc,
|
| 45 |
+
std::vector<c10::Stream> streams) const;
|
| 46 |
+
|
| 47 |
+
c10::intrusive_ptr<JitFuture> retrieveOwnerRRef(const RRefId& rrefId) const;
|
| 48 |
+
|
| 49 |
+
c10::intrusive_ptr<JitFuture> processScriptRRefFetchCall(
|
| 50 |
+
RpcCommandBase& rpc) const;
|
| 51 |
+
|
| 52 |
+
virtual c10::intrusive_ptr<JitFuture> processPythonRRefFetchCall(
|
| 53 |
+
RpcCommandBase& rpc) const;
|
| 54 |
+
|
| 55 |
+
c10::intrusive_ptr<JitFuture> processRRefUserDelete(
|
| 56 |
+
RpcCommandBase& rpc) const;
|
| 57 |
+
|
| 58 |
+
c10::intrusive_ptr<JitFuture> processRRefChildAccept(
|
| 59 |
+
RpcCommandBase& rpc) const;
|
| 60 |
+
|
| 61 |
+
c10::intrusive_ptr<JitFuture> processRRefForkRequest(
|
| 62 |
+
RpcCommandBase& rpc) const;
|
| 63 |
+
|
| 64 |
+
c10::intrusive_ptr<JitFuture> processForwardAutogradReq(
|
| 65 |
+
RpcCommandBase& rpc,
|
| 66 |
+
std::vector<c10::Stream> streams) const;
|
| 67 |
+
|
| 68 |
+
c10::intrusive_ptr<JitFuture> processBackwardAutogradReq(
|
| 69 |
+
RpcCommandBase& rpc,
|
| 70 |
+
std::vector<c10::Stream> streams) const;
|
| 71 |
+
|
| 72 |
+
c10::intrusive_ptr<JitFuture> processCleanupAutogradContextReq(
|
| 73 |
+
RpcCommandBase& rpc) const;
|
| 74 |
+
|
| 75 |
+
c10::intrusive_ptr<JitFuture> processRunWithProfilingReq(
|
| 76 |
+
RpcCommandBase& rpc) const;
|
| 77 |
+
|
| 78 |
+
virtual void handleRRefDelete(c10::intrusive_ptr<RRef>& rref) const;
|
| 79 |
+
|
| 80 |
+
c10::intrusive_ptr<JitFuture> processRpc(
|
| 81 |
+
RpcCommandBase& rpc,
|
| 82 |
+
const MessageType& messageType,
|
| 83 |
+
std::vector<c10::Stream> streams) const;
|
| 84 |
+
|
| 85 |
+
virtual c10::intrusive_ptr<JitFuture> processRpcWithErrors(
|
| 86 |
+
RpcCommandBase& rpc,
|
| 87 |
+
const MessageType& messageType,
|
| 88 |
+
std::vector<c10::Stream> streams) const;
|
| 89 |
+
|
| 90 |
+
c10::intrusive_ptr<Message> handleError(
|
| 91 |
+
const std::exception& e,
|
| 92 |
+
const MessageType messageType,
|
| 93 |
+
int64_t messageId) const;
|
| 94 |
+
|
| 95 |
+
virtual bool cudaAvailable() const;
|
| 96 |
+
|
| 97 |
+
virtual c10::intrusive_ptr<JitFuture> processRRefBackward(
|
| 98 |
+
RpcCommandBase& rpc) const;
|
| 99 |
+
|
| 100 |
+
// Helpers to run user-defined functions, operators and other computations.
|
| 101 |
+
|
| 102 |
+
c10::intrusive_ptr<JitFuture> runJitOperator(
|
| 103 |
+
const jit::Operator& op,
|
| 104 |
+
std::vector<at::IValue>& stack,
|
| 105 |
+
std::vector<c10::Stream> streams) const;
|
| 106 |
+
|
| 107 |
+
// Helpers to convert various kinds of objects into already-completed futures.
|
| 108 |
+
|
| 109 |
+
c10::intrusive_ptr<JitFuture> asFuture(IValue value, TypePtr type) const;
|
| 110 |
+
|
| 111 |
+
c10::intrusive_ptr<JitFuture> asFuture(
|
| 112 |
+
c10::intrusive_ptr<Message> message) const;
|
| 113 |
+
|
| 114 |
+
c10::intrusive_ptr<JitFuture> asFuture(std::exception_ptr err) const;
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
} // namespace rpc
|
| 118 |
+
} // namespace distributed
|
| 119 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_agent.h
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/request_callback.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 6 |
+
|
| 7 |
+
#include <algorithm>
|
| 8 |
+
#include <cctype>
|
| 9 |
+
#include <chrono>
|
| 10 |
+
#include <condition_variable>
|
| 11 |
+
#include <mutex>
|
| 12 |
+
#include <thread>
|
| 13 |
+
|
| 14 |
+
namespace torch {
|
| 15 |
+
namespace distributed {
|
| 16 |
+
namespace rpc {
|
| 17 |
+
|
| 18 |
+
using DeviceMap = std::unordered_map<c10::Device, c10::Device>;
|
| 19 |
+
|
| 20 |
+
// Default RPC timeout
|
| 21 |
+
constexpr float kDefaultRpcTimeoutSeconds = 60;
|
| 22 |
+
// Unset RPC timeout. This is the value agent::send() will have if user does not
|
| 23 |
+
// pass in a specific timeout, and indicates that we must use the default
|
| 24 |
+
// timeout for RPCs.
|
| 25 |
+
constexpr float kUnsetRpcTimeout = -1;
|
| 26 |
+
constexpr auto kDefaultInitMethod = "env://";
|
| 27 |
+
constexpr float kSecToMsConversion = 1000;
|
| 28 |
+
constexpr auto kRpcTimeoutErrorStr =
|
| 29 |
+
"RPC ran for more than set timeout ({} ms) and will now be marked with an error";
|
| 30 |
+
|
| 31 |
+
using steady_clock_time_point =
|
| 32 |
+
std::chrono::time_point<std::chrono::steady_clock>;
|
| 33 |
+
// Input is qualified name string, output is JIT StrongTypePtr
|
| 34 |
+
// Same as jit::TypeResolver, did not import jit::TypeResolver to here
|
| 35 |
+
// because it could introduce cyclic dependencies.
|
| 36 |
+
using TypeResolver =
|
| 37 |
+
std::function<c10::StrongTypePtr(const c10::QualifiedName&)>;
|
| 38 |
+
|
| 39 |
+
struct TORCH_API RpcBackendOptions {
|
| 40 |
+
RpcBackendOptions()
|
| 41 |
+
: RpcBackendOptions(kDefaultRpcTimeoutSeconds, kDefaultInitMethod) {}
|
| 42 |
+
|
| 43 |
+
RpcBackendOptions(float rpcTimeoutSeconds, std::string initMethod)
|
| 44 |
+
: rpcTimeoutSeconds(rpcTimeoutSeconds),
|
| 45 |
+
initMethod(std::move(initMethod)) {
|
| 46 |
+
TORCH_CHECK(rpcTimeoutSeconds >= 0, "RPC Timeout must be non-negative");
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
float rpcTimeoutSeconds;
|
| 50 |
+
std::string initMethod;
|
| 51 |
+
};
|
| 52 |
+
|
| 53 |
+
// A globally unique ID to identify an RpcAgent
|
| 54 |
+
struct TORCH_API WorkerInfo : torch::CustomClassHolder {
|
| 55 |
+
WorkerInfo(std::string name, int64_t id);
|
| 56 |
+
|
| 57 |
+
WorkerInfo(std::string name, worker_id_t id);
|
| 58 |
+
|
| 59 |
+
bool operator==(const WorkerInfo& rhs) {
|
| 60 |
+
return (id_ == rhs.id_) && (name_ == rhs.name_);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
static constexpr size_t MAX_NAME_LEN = 128;
|
| 64 |
+
|
| 65 |
+
const std::string name_;
|
| 66 |
+
const worker_id_t id_;
|
| 67 |
+
};
|
| 68 |
+
|
| 69 |
+
struct TORCH_API RegisterWorkerInfoOnce {
|
| 70 |
+
RegisterWorkerInfoOnce();
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
TORCH_API std::ostream& operator<<(
|
| 74 |
+
std::ostream& os,
|
| 75 |
+
const WorkerInfo& workerInfo);
|
| 76 |
+
|
| 77 |
+
// Struct for options to configure the RPC Retry protocol.
|
| 78 |
+
struct TORCH_API RpcRetryOptions {
|
| 79 |
+
// Using a default constructor like all other Options structs in the RPC
|
| 80 |
+
// codebase. TORCH_CHECKs for input validation are done in the
|
| 81 |
+
// sendWithRetries function.
|
| 82 |
+
RpcRetryOptions() = default;
|
| 83 |
+
// Maximum number of times we will retry the RPC
|
| 84 |
+
int maxRetries{5};
|
| 85 |
+
// Initial duration between consecutive RPC send attempts
|
| 86 |
+
std::chrono::milliseconds rpcRetryDuration{std::chrono::milliseconds(1000)};
|
| 87 |
+
// Constant for exponential backoff used while calculating future wait
|
| 88 |
+
// durations
|
| 89 |
+
float retryBackoff{1.5};
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
// Struct that stores all the metadata needed to retry a given RPC.
|
| 93 |
+
struct TORCH_API RpcRetryInfo {
|
| 94 |
+
RpcRetryInfo(
|
| 95 |
+
const WorkerInfo& to,
|
| 96 |
+
c10::intrusive_ptr<Message> message,
|
| 97 |
+
c10::intrusive_ptr<JitFuture> originalFuture,
|
| 98 |
+
int retryCount,
|
| 99 |
+
RpcRetryOptions options)
|
| 100 |
+
: to_(to),
|
| 101 |
+
message_(std::move(message)),
|
| 102 |
+
originalFuture_(std::move(originalFuture)),
|
| 103 |
+
retryCount_(retryCount),
|
| 104 |
+
options_(options) {}
|
| 105 |
+
|
| 106 |
+
const WorkerInfo& to_;
|
| 107 |
+
c10::intrusive_ptr<Message> message_;
|
| 108 |
+
// Future that is returned to the caller of sendWithRetries().
|
| 109 |
+
c10::intrusive_ptr<JitFuture> originalFuture_;
|
| 110 |
+
// Number of send attempts completed so far.
|
| 111 |
+
int retryCount_;
|
| 112 |
+
RpcRetryOptions options_;
|
| 113 |
+
};
|
| 114 |
+
|
| 115 |
+
// ``RpcAgent`` is the base class for sending and receiving RPC messages. It
|
| 116 |
+
// provides a unified ``send`` API for both request and response messages, and
|
| 117 |
+
// will invoke the given ``RequestCallback`` to process received requests. It
|
| 118 |
+
// should immediately become ready to serve request and accept response after
|
| 119 |
+
// construction.
|
| 120 |
+
class TORCH_API RpcAgent {
|
| 121 |
+
public:
|
| 122 |
+
// `WorkerInfo` is the globally unique identifier for this RpcAgent instance.
|
| 123 |
+
// It contains a ``name_`` field and an ``id_`` field. ``name_`` is the
|
| 124 |
+
// globally unique name for this ``RpcAgent``. It is up to the ``RpcAgent``
|
| 125 |
+
// implementation to determine how to resolve names. ``id_`` is the globally
|
| 126 |
+
// unique ID for this ``RpcAgent``. This should be determined by the
|
| 127 |
+
// ``RpcAgent`` implementation.
|
| 128 |
+
// The ``RequestCallback`` will be invoked to handle received requests. This
|
| 129 |
+
// ``RpcAgent`` base class makes no assumption on the thread-safeness of the
|
| 130 |
+
// ``RequestCallback``. ``RpcAgent`` implementations need to make sure that
|
| 131 |
+
// its threading model conform to ``RequestCallback``'s requirement.
|
| 132 |
+
// NB: RpcAgent implementations should not start serving requests until
|
| 133 |
+
// ``start()`` is called, as there could be other contexts that have not been
|
| 134 |
+
// initialized yet at this time.
|
| 135 |
+
RpcAgent(
|
| 136 |
+
WorkerInfo id,
|
| 137 |
+
std::unique_ptr<RequestCallback> cb,
|
| 138 |
+
std::chrono::milliseconds rpcTimeout);
|
| 139 |
+
|
| 140 |
+
virtual ~RpcAgent();
|
| 141 |
+
|
| 142 |
+
// Send a message to the ``RpcAgent`` of id ``to`` and returns a
|
| 143 |
+
// ``JitFuture`` ptr. The implementation must be asynchronous, i.e., it
|
| 144 |
+
// cannot block until it receives the response.
|
| 145 |
+
//
|
| 146 |
+
// If ``message.isRequest()`` is true, the ``JitFuture`` will be
|
| 147 |
+
// completed when the response arrives. For other message types, the Future
|
| 148 |
+
// should be ignored by the caller.
|
| 149 |
+
virtual c10::intrusive_ptr<JitFuture> send(
|
| 150 |
+
const WorkerInfo& to,
|
| 151 |
+
c10::intrusive_ptr<Message> message,
|
| 152 |
+
const float rpcTimeoutSeconds = kUnsetRpcTimeout,
|
| 153 |
+
const DeviceMap& deviceMap = {}) = 0;
|
| 154 |
+
|
| 155 |
+
// Retries sending the message up to maxRetries times until an ACK is
|
| 156 |
+
// received. The duration between consecutive sends is increased over
|
| 157 |
+
// time using an exponential backoff algorithm.
|
| 158 |
+
//
|
| 159 |
+
// Sends ``message`` to the ``RpcAgent`` of id ``to`` and returns a
|
| 160 |
+
// ``JitFuture`` ptr, just like send(). Caller can specify the maximum
|
| 161 |
+
// number of retries for this RPC (default is 5), initial duration between
|
| 162 |
+
// sends (default is 1000ms), and backoff constant (default is 1.5) by
|
| 163 |
+
// passing in the RpcRetryOptions struct. This API might end up
|
| 164 |
+
// executing a method twice on the remote end (it does not guarantee
|
| 165 |
+
// exactly-once semantics). Therefore, the user must ensure their requests
|
| 166 |
+
// are idempotent.
|
| 167 |
+
c10::intrusive_ptr<JitFuture> sendWithRetries(
|
| 168 |
+
const WorkerInfo& to,
|
| 169 |
+
c10::intrusive_ptr<Message> message,
|
| 170 |
+
RpcRetryOptions retryOptions = RpcRetryOptions());
|
| 171 |
+
|
| 172 |
+
// Return a reference to the ``WorkerInfo`` of this RpcAgent.
|
| 173 |
+
// NB: not using ``c10::optional<const std::string&>`` here because we might
|
| 174 |
+
// need to create a separate RPC API lib and avoid forcing all ``RpcAgent``
|
| 175 |
+
// implementations to depend on libtorch.
|
| 176 |
+
const WorkerInfo& getWorkerInfo() const;
|
| 177 |
+
|
| 178 |
+
// Return a reference to the ``WorkerInfo`` of the given ``workerName``.
|
| 179 |
+
virtual const WorkerInfo& getWorkerInfo(
|
| 180 |
+
const std::string& workerName) const = 0;
|
| 181 |
+
|
| 182 |
+
virtual const WorkerInfo& getWorkerInfo(worker_id_t id) const = 0;
|
| 183 |
+
|
| 184 |
+
virtual std::vector<WorkerInfo> getWorkerInfos() const = 0;
|
| 185 |
+
|
| 186 |
+
// Retrieve the timeout for all RPCs.
|
| 187 |
+
inline std::chrono::milliseconds getRpcTimeout() const {
|
| 188 |
+
return rpcTimeout_.load();
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
// Set the timeout for all RPCs
|
| 192 |
+
inline void setRpcTimeout(const std::chrono::milliseconds& rpcTimeout) {
|
| 193 |
+
rpcTimeout_.store(rpcTimeout);
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
// Call sync and join all internal threads. This method should be called
|
| 197 |
+
// before every RPC process exits.
|
| 198 |
+
virtual void join(bool shutdown = false, float timeout = 0) = 0;
|
| 199 |
+
|
| 200 |
+
// Synchronize the this process with other ``RpcAgent`` processes. Block until
|
| 201 |
+
// all ``RpcAgent``s reach this method and send all pending messages.
|
| 202 |
+
virtual void sync() = 0;
|
| 203 |
+
|
| 204 |
+
// Sets up backend-agnostic state for accepting requests. Currently, this
|
| 205 |
+
// entails setting rpcAgentRunning_ to true, creating the retry thread, and
|
| 206 |
+
// calling the backend's startImpl.
|
| 207 |
+
void start();
|
| 208 |
+
|
| 209 |
+
// Derived classes must override this function to start accepting requests.
|
| 210 |
+
// This is used to initialize any backend-specific state. Users must call
|
| 211 |
+
// start, not startImpl, to initialize the RPC Agent.
|
| 212 |
+
virtual void startImpl() = 0;
|
| 213 |
+
|
| 214 |
+
// Stop accepting requests and shutdown the RPC framework as soon as possible
|
| 215 |
+
// by terminating all RPC threads.
|
| 216 |
+
void shutdown();
|
| 217 |
+
|
| 218 |
+
// Derived classes must override this function to start accepting requests.
|
| 219 |
+
// THis is used to clean up any backend-specific state. Users must call
|
| 220 |
+
// shutdown, not shutdownImpl, to shutdown the RPC Agent.
|
| 221 |
+
virtual void shutdownImpl() = 0;
|
| 222 |
+
|
| 223 |
+
// Check if current RPC agent is set.
|
| 224 |
+
static bool isCurrentRpcAgentSet();
|
| 225 |
+
|
| 226 |
+
// Retrieve the valid current RPC agent.
|
| 227 |
+
static std::shared_ptr<RpcAgent> getCurrentRpcAgent();
|
| 228 |
+
|
| 229 |
+
// Set the current RPC agent.
|
| 230 |
+
static void setCurrentRpcAgent(std::shared_ptr<RpcAgent> rpcAgent);
|
| 231 |
+
|
| 232 |
+
// Retrieve metrics as KV map
|
| 233 |
+
virtual std::unordered_map<std::string, std::string> getMetrics() = 0;
|
| 234 |
+
|
| 235 |
+
// Retrieve debug info in addition to metrics as KV map
|
| 236 |
+
virtual std::unordered_map<std::string, std::string> getDebugInfo();
|
| 237 |
+
|
| 238 |
+
// Flag to control whether GIL wait times
|
| 239 |
+
// should be profiled or not.
|
| 240 |
+
void enableGILProfiling(bool flag);
|
| 241 |
+
|
| 242 |
+
// Retrieve wheher we should profile GIL wait times or not.
|
| 243 |
+
bool isGILProfilingEnabled();
|
| 244 |
+
|
| 245 |
+
// Set type resolver that will be passed to JIT pickler to resolver type Ptr
|
| 246 |
+
// based on type str.
|
| 247 |
+
void setTypeResolver(std::shared_ptr<TypeResolver> typeResolver);
|
| 248 |
+
|
| 249 |
+
// Get the type resolver
|
| 250 |
+
std::shared_ptr<TypeResolver> getTypeResolver();
|
| 251 |
+
|
| 252 |
+
// Retrieves the device map for the provided destination worker.
|
| 253 |
+
virtual DeviceMap getDeviceMap(const WorkerInfo& dst) const;
|
| 254 |
+
|
| 255 |
+
// Retrieve the (non-CPU) devices that are supported by the agent.
|
| 256 |
+
virtual const std::vector<c10::Device>& getDevices() const;
|
| 257 |
+
|
| 258 |
+
protected:
|
| 259 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 260 |
+
const WorkerInfo workerInfo_;
|
| 261 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 262 |
+
const std::unique_ptr<RequestCallback> cb_;
|
| 263 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 264 |
+
std::atomic<std::chrono::milliseconds> rpcTimeout_;
|
| 265 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 266 |
+
std::atomic<bool> profilingEnabled_;
|
| 267 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 268 |
+
std::shared_ptr<TypeResolver> typeResolver_;
|
| 269 |
+
// Atomic boolean indicating whether this agent is running. It controls
|
| 270 |
+
// whether several background threads should be running. It is set in
|
| 271 |
+
// RpcAgent::start() and unset in the derived class shutdown().
|
| 272 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 273 |
+
std::atomic<bool> rpcAgentRunning_;
|
| 274 |
+
|
| 275 |
+
private:
|
| 276 |
+
static std::shared_ptr<RpcAgent> currentRpcAgent_;
|
| 277 |
+
// Add GIL wait time data point to metrics
|
| 278 |
+
virtual void addGilWaitTime(const std::chrono::microseconds gilWaitTime) = 0;
|
| 279 |
+
friend class PythonRpcHandler;
|
| 280 |
+
|
| 281 |
+
// Map that stores metadata for RPC's that may need to be re-tried as well as
|
| 282 |
+
// the timepoint at which we should re-try them.
|
| 283 |
+
std::map<
|
| 284 |
+
steady_clock_time_point,
|
| 285 |
+
std::unordered_set<std::shared_ptr<RpcRetryInfo>>>
|
| 286 |
+
rpcRetryMap_;
|
| 287 |
+
|
| 288 |
+
// Thread that checks for retryable RPC's in the rpcRetryMap_ and sleeps until
|
| 289 |
+
// the next unACKed RPC's timeout has expired.
|
| 290 |
+
std::thread rpcRetryThread_;
|
| 291 |
+
|
| 292 |
+
// Function that rpcRetryThread_ calls in a loop as long as RpcAgent is
|
| 293 |
+
// running.
|
| 294 |
+
void retryExpiredRpcs();
|
| 295 |
+
|
| 296 |
+
// This is the callback attached to futures corresponding to send retries.
|
| 297 |
+
// This handles 3 cases: 1). send was completed, 2). send failed with an
|
| 298 |
+
// error and we've done maxRetries failed send attempts, and 3). send
|
| 299 |
+
// failed with an error and we have more retries to go. In case 1, we mark
|
| 300 |
+
// the original future as complete. In case 2, we mark the future with an
|
| 301 |
+
// error and do not retry again. In case 3, we move the RpcRetryInfo struct
|
| 302 |
+
// to another time point in the map to schedule the RPC for a future send.
|
| 303 |
+
void rpcRetryCallback(
|
| 304 |
+
JitFuture& message,
|
| 305 |
+
steady_clock_time_point newTime,
|
| 306 |
+
std::shared_ptr<RpcRetryInfo> earliestRpc);
|
| 307 |
+
|
| 308 |
+
// Function that uses the exponential backoff algorithm to compute the next
|
| 309 |
+
// time point to retry a given RPC.
|
| 310 |
+
inline steady_clock_time_point computeNewRpcRetryTime(
|
| 311 |
+
RpcRetryOptions& options,
|
| 312 |
+
int retryCount) {
|
| 313 |
+
// The exponential backoff algorithm being used here is:
|
| 314 |
+
// newTime = timeNow + (retryDuration * (backoffConstant ^ retryCount)).
|
| 315 |
+
std::chrono::milliseconds timedelta =
|
| 316 |
+
std::chrono::duration_cast<std::chrono::milliseconds>(
|
| 317 |
+
options.rpcRetryDuration * pow(options.retryBackoff, retryCount));
|
| 318 |
+
return std::chrono::time_point_cast<std::chrono::milliseconds>(
|
| 319 |
+
std::chrono::steady_clock::now() + timedelta);
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
// Condition Variable to signal when the rpcRetryMap_ has been populated.
|
| 323 |
+
std::condition_variable rpcRetryMapCV_;
|
| 324 |
+
|
| 325 |
+
// Mutex to protect RpcRetryMap_.
|
| 326 |
+
std::mutex rpcRetryMutex_;
|
| 327 |
+
};
|
| 328 |
+
|
| 329 |
+
} // namespace rpc
|
| 330 |
+
} // namespace distributed
|
| 331 |
+
} // namespace torch
|
| 332 |
+
|
| 333 |
+
namespace std {
|
| 334 |
+
template <>
|
| 335 |
+
struct hash<torch::distributed::rpc::WorkerInfo> {
|
| 336 |
+
std::size_t operator()(
|
| 337 |
+
const torch::distributed::rpc::WorkerInfo& worker_info) const noexcept {
|
| 338 |
+
return worker_info.id_;
|
| 339 |
+
}
|
| 340 |
+
};
|
| 341 |
+
} // namespace std
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/jit_type.h>
|
| 4 |
+
#include <ATen/core/rref_interface.h>
|
| 5 |
+
#include <c10/core/Event.h>
|
| 6 |
+
#include <c10/util/Optional.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 8 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 9 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 10 |
+
|
| 11 |
+
#include <atomic>
|
| 12 |
+
|
| 13 |
+
namespace torch {
|
| 14 |
+
namespace distributed {
|
| 15 |
+
namespace rpc {
|
| 16 |
+
|
| 17 |
+
class RRef;
|
| 18 |
+
class RRefContext;
|
| 19 |
+
class UserRRef;
|
| 20 |
+
|
| 21 |
+
constexpr int OWNER_IDX = 0; // index of ownerId in the tuple
|
| 22 |
+
constexpr int RREFID_ON_IDX = 1; // index of RRefId.createdOn_ in the tuple
|
| 23 |
+
constexpr int RREFID_ID_IDX = 2; // index of RRefId.localId_ in the tuple
|
| 24 |
+
constexpr int FORKID_ON_IDX = 3; // index of ForkId.createdOn_ in the tuple
|
| 25 |
+
constexpr int FORKID_ID_IDX = 4; // index of ForkId.localId_ in the tuple
|
| 26 |
+
constexpr int PARENT_IDX = 5; // index of parent in the tuple
|
| 27 |
+
constexpr int TYPE_IDX = 6; // index of parent in the tuple
|
| 28 |
+
|
| 29 |
+
// NB: if more fields are added, make sure this field is also bumped
|
| 30 |
+
constexpr int RFD_TUPLE_SIZE = 7; // number of RRefForkData fields in py::tuple
|
| 31 |
+
|
| 32 |
+
// Represents fork of an RRef to be sent over the wire.
|
| 33 |
+
struct TORCH_API RRefForkData {
|
| 34 |
+
const worker_id_t ownerId_;
|
| 35 |
+
const RRefId rrefId_;
|
| 36 |
+
const ForkId forkId_;
|
| 37 |
+
const worker_id_t parent_;
|
| 38 |
+
const std::string typeStr_;
|
| 39 |
+
|
| 40 |
+
RRefForkData(
|
| 41 |
+
worker_id_t ownerId,
|
| 42 |
+
const RRefId& rrefId,
|
| 43 |
+
const ForkId& forkId,
|
| 44 |
+
worker_id_t parent,
|
| 45 |
+
std::string typeStr);
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
// Note [RRef Protocol]
|
| 49 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 50 |
+
//
|
| 51 |
+
// [Background]
|
| 52 |
+
//
|
| 53 |
+
// RRef stands for Remote REFerence. Each RRef is owned by a single worker
|
| 54 |
+
// (i.e., owner) and can be used by multiple users. The owner stores the real
|
| 55 |
+
// data referenced by its RRefs. RRef needs to support fast and scalable RPC.
|
| 56 |
+
// Hence, in the design, we avoid using a single global master to keep RRef
|
| 57 |
+
// states, instead owners will keep track of the global reference counts
|
| 58 |
+
// for its RRefs. Every RRef can be uniquely identified by a global RRefId,
|
| 59 |
+
// which is assigned at the time it is first created either on a user or on the
|
| 60 |
+
// owner.
|
| 61 |
+
//
|
| 62 |
+
// On the owner worker, there is only one OwnerRRef instance, which contains the
|
| 63 |
+
// real data, while on user workers, there can be as many UserRRefs as
|
| 64 |
+
// necessary, and UserRRef does not hold the data. All usage on the OwnerRRef
|
| 65 |
+
// should retrieve the unique OwnerRRef instance using the globally unique
|
| 66 |
+
// RRefId. //A UserRRef will be created when it is used as an argument or return
|
| 67 |
+
// value in dist.rpc or dist.remote call, but RRef forking and reference
|
| 68 |
+
// counting (RC) are completely transparent to applications. Every UserRRef will
|
| 69 |
+
// also have its globally unique ForkId.
|
| 70 |
+
//
|
| 71 |
+
// [Assumptions]
|
| 72 |
+
//
|
| 73 |
+
// 1. Transient Network Failures
|
| 74 |
+
//
|
| 75 |
+
// TODO: current RRef implementation does not tolerate failures
|
| 76 |
+
//
|
| 77 |
+
// The RRef design handles transient network failures by retrying
|
| 78 |
+
// messages. Node crashes or permanent network partition is beyond the scope.
|
| 79 |
+
// When those incidents occur, the application may take down all workers, revert
|
| 80 |
+
// to the previous checkpoint, and resume training.
|
| 81 |
+
//
|
| 82 |
+
// 2. Non-idempotent UDFs
|
| 83 |
+
//
|
| 84 |
+
// We assume UDFs are not idempotent and therefore cannot be retried. However,
|
| 85 |
+
// internal RRef control messages are idempotent and retried upon message
|
| 86 |
+
// failure.
|
| 87 |
+
//
|
| 88 |
+
// TODO: RRef internal messages are not yet idempotent
|
| 89 |
+
//
|
| 90 |
+
// 3. Out of Order Message Delivery
|
| 91 |
+
//
|
| 92 |
+
// We do not assume message delivery order between any pair of nodes, because
|
| 93 |
+
// both sender and receiver are using multiple threads. There is no guarantee on
|
| 94 |
+
// which message will be processed first.
|
| 95 |
+
//
|
| 96 |
+
// [RRef Lifetime]
|
| 97 |
+
//
|
| 98 |
+
// The goal of the protocol is to delete an OwnerRRef at an appropriate time.
|
| 99 |
+
// The right time to delete an OwnerRRef is when there are no living UserRRefs
|
| 100 |
+
// and Python GC also agrees to delete the OwnerRRef instance on the owner. The
|
| 101 |
+
// tricky part is to determine if there are any living UserRRefs.
|
| 102 |
+
//
|
| 103 |
+
// A user can get a UserRRef in three situations:
|
| 104 |
+
//
|
| 105 |
+
// (1). Receiving a UserRRef from the owner.
|
| 106 |
+
// (2). Receiving a UserRRef from another user.
|
| 107 |
+
// (3). Creating a new UserRRef owned by another worker.
|
| 108 |
+
//
|
| 109 |
+
// (1) is the simplest case where the owner initiates the fork, and hence it can
|
| 110 |
+
// easily increment local RC. The only requirement is that any UserRRef must
|
| 111 |
+
// notify the owner before destruction. Hence, we need the first guarantee:
|
| 112 |
+
//
|
| 113 |
+
// G1. The owner will be notified when any UserRRef is deleted.
|
| 114 |
+
//
|
| 115 |
+
// As messages might come delayed or out-of-order, we need more one guarantee to
|
| 116 |
+
// make sure the delete message is not sent out too soon. Let us first introduce
|
| 117 |
+
// a new concept. If A sends an RPC to B that involves an RRef, we call the RRef
|
| 118 |
+
// on A the parent RRef and the RRef on B the child RRef.
|
| 119 |
+
//
|
| 120 |
+
// G2. Parent RRef cannot be deleted until the child RRef is confirmed by the
|
| 121 |
+
// owner.
|
| 122 |
+
//
|
| 123 |
+
// Under (1), where the caller is UserRRef and callee is OwnerRRef, it simply
|
| 124 |
+
// means that the user will not send out the delete message until all previous
|
| 125 |
+
// messages are ACKed. Note that ACKed does not mean the owner finishes
|
| 126 |
+
// executing the function, instead, it only means the owner has retrieved its
|
| 127 |
+
// local OwnerRRef and about to pass it to the function, which is sufficient to
|
| 128 |
+
// keep the OwnerRRef alive even if the delete message from the user arrives at
|
| 129 |
+
// the owner before the function finishes execution.
|
| 130 |
+
//
|
| 131 |
+
// With (2) and (3), it is possible that the owner only partially knows the RRef
|
| 132 |
+
// fork graph or not even knowing it at all. For example, the RRef could be
|
| 133 |
+
// constructed on a user, and before the owner receives the RPC call, the
|
| 134 |
+
// creator user might have already shared the RRef with other users, and those
|
| 135 |
+
// users could further share the RRef. One invariant is that the fork graph of
|
| 136 |
+
// any RRef is always a tree rooted at the owner, because forking an RRef always
|
| 137 |
+
// creates a new RRef instance, and hence every RRef has a single parent. One
|
| 138 |
+
// nasty detail is that when an RRef is created on a user, technically the owner
|
| 139 |
+
// is not its parent but we still consider it that way and it does not break the
|
| 140 |
+
// argument below.
|
| 141 |
+
//
|
| 142 |
+
// The owner's view on any node (fork) in the tree has three stages:
|
| 143 |
+
//
|
| 144 |
+
// 1) unknown -> 2) known -> 3) deleted.
|
| 145 |
+
//
|
| 146 |
+
// The owner's view on the entire tree keeps changing. The owner deletes its
|
| 147 |
+
// OwnerRRef instance when it thinks there are no living UserRRefs, i.e., when
|
| 148 |
+
// OwnerRRef is deleted, all UserRRefs could be either indeed deleted or
|
| 149 |
+
// unknown. The dangerous case is when some forks are unknown and others are
|
| 150 |
+
// deleted.
|
| 151 |
+
//
|
| 152 |
+
// G2 trivially guarantees that no parent UserRRef Y can be deleted before the
|
| 153 |
+
// owner knows all of Y's children UserRRefs.
|
| 154 |
+
//
|
| 155 |
+
// However, it is possible that the child UserRRef Z may be deleted before the
|
| 156 |
+
// owner knows its parent Y. More specifically, this can happen when all of Z's
|
| 157 |
+
// messages are processed by the owner before all messages from Y, including the
|
| 158 |
+
// delete message. Nevertheless, this does not cause any problem. Because, at
|
| 159 |
+
// least one of Y's ancestor will be alive, and it will prevent the owner from
|
| 160 |
+
// deleting the OwnerRRef. Consider the following example: (NB: this scenario
|
| 161 |
+
// will no longer relevant when we block UDF until all RRefs are confirmed by
|
| 162 |
+
// the owner)
|
| 163 |
+
//
|
| 164 |
+
// OwnerRRef -> A -> Y -> Z
|
| 165 |
+
//
|
| 166 |
+
// OwnerRRef forks to A, then A forks to Y, and Y forks to Z. Z can be deleted
|
| 167 |
+
// without OwnerRRef knowing Y. However, the OwnerRRef will at least know A, as
|
| 168 |
+
// the owner directly forks the RRef to A. A won't die before the owner knows Y.
|
| 169 |
+
//
|
| 170 |
+
// Things get a little trickier if the RRef is created on a user:
|
| 171 |
+
//
|
| 172 |
+
// OwnerRRef
|
| 173 |
+
// ^
|
| 174 |
+
// |
|
| 175 |
+
// A -> Y -> Z
|
| 176 |
+
//
|
| 177 |
+
// If Z calls to_here on the UserRRef, the owner at least knows A when Z is
|
| 178 |
+
// deleted, because otherwise to_here wouldn't finish. If Z does not call
|
| 179 |
+
// to_here, it is possible that the owner receives all messages from Z before
|
| 180 |
+
// any message from A and Y. In this case, as the real data of the OwnerRRef has
|
| 181 |
+
// not been created yet, there is nothing to be deleted either. It is the same
|
| 182 |
+
// as Z does not exist at all Hence, it's still OK.
|
| 183 |
+
//
|
| 184 |
+
// See #26759 for more details and discussions.
|
| 185 |
+
//
|
| 186 |
+
// TODO: make RRef an IValue, and edit createStackForSchema accordingly
|
| 187 |
+
// TODO: make RRef system messages idempotent and retry on failures.
|
| 188 |
+
//
|
| 189 |
+
// ``RRef`` is the base type for both ``UserRRef`` and ``OwnerRRef``.
|
| 190 |
+
// Each ``RRef`` has a globally unique ``RRefId``.
|
| 191 |
+
class TORCH_API RRef : public RRefInterface {
|
| 192 |
+
public:
|
| 193 |
+
// RRef is made NOT copyable NOT movable to prevent messing up reference
|
| 194 |
+
// counting.
|
| 195 |
+
explicit RRef(const RRef& other) = delete;
|
| 196 |
+
explicit RRef(RRef&& other) = delete;
|
| 197 |
+
RRef& operator=(RRef&& other) = delete;
|
| 198 |
+
|
| 199 |
+
~RRef() override = default;
|
| 200 |
+
|
| 201 |
+
// returns the worker id of the owner
|
| 202 |
+
inline worker_id_t owner() const override {
|
| 203 |
+
return ownerId_;
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
// returns the worker name of the owner
|
| 207 |
+
inline std::string ownerName() const override {
|
| 208 |
+
return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_).name_;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
// returns the worker info of the owner
|
| 212 |
+
inline WorkerInfo ownerWorkerInfo() const {
|
| 213 |
+
return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_);
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
// Returns the globally unique RRefId of this RRef
|
| 217 |
+
inline const RRefId& rrefId() const {
|
| 218 |
+
return rrefId_;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
inline bool isPyObj() const {
|
| 222 |
+
return type_ == PyObjectType::get();
|
| 223 |
+
}
|
| 224 |
+
inline const TypePtr type() const override {
|
| 225 |
+
return type_;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
// Save the future corresponding to the creation of this RRef on a remote
|
| 229 |
+
// node. Note that this is only set when processing requests invoked with
|
| 230 |
+
// rpc.remote. This is only used to get the future corresponding to the rref
|
| 231 |
+
// for profiling use cases.
|
| 232 |
+
inline void registerOwnerCreationFuture(c10::intrusive_ptr<JitFuture> fut) {
|
| 233 |
+
ownerCreationFuture_ = std::move(fut);
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
// Get the future corresponding to the creation of this rref.
|
| 237 |
+
inline c10::intrusive_ptr<JitFuture> getOwnerCreationFuture() const {
|
| 238 |
+
return ownerCreationFuture_;
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
// Check if creation of this RRef on owner node has timed out.
|
| 242 |
+
inline bool getTimedOut() const {
|
| 243 |
+
return timedOut_.load();
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
// Dispatches an error to the correct handler based on its RPCErrorType.
|
| 247 |
+
void handleError(RPCErrorType errorType, const JitFuture& JitFuture);
|
| 248 |
+
|
| 249 |
+
// Send delete UserRRef request to Owner,
|
| 250 |
+
// if the request hasn't been sent yet.
|
| 251 |
+
// There are 2 cases to call it,
|
| 252 |
+
// 1, Python GC decides end of UserRRef lifetime, calling destructor.
|
| 253 |
+
// 2, RPC module graceful shutdown calls it on all UserRRefs tracked
|
| 254 |
+
// in the RRefContext.
|
| 255 |
+
virtual void tryDel() {}
|
| 256 |
+
|
| 257 |
+
protected:
|
| 258 |
+
// Indicates that the creation of this RRef on owner node has timed out.
|
| 259 |
+
inline void setTimedOut() {
|
| 260 |
+
timedOut_ = true;
|
| 261 |
+
}
|
| 262 |
+
friend class RRefContext;
|
| 263 |
+
|
| 264 |
+
RRef(worker_id_t ownerId, const RRefId& rrefId, TypePtr type);
|
| 265 |
+
|
| 266 |
+
virtual RRefForkData fork() const;
|
| 267 |
+
|
| 268 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 269 |
+
const worker_id_t ownerId_;
|
| 270 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 271 |
+
const RRefId rrefId_;
|
| 272 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 273 |
+
std::atomic<bool> timedOut_{false};
|
| 274 |
+
|
| 275 |
+
// type field to denote the type of the element that the RRef is holding
|
| 276 |
+
// it could be any TypePtr that JIT support, including PyObjectType
|
| 277 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 278 |
+
const TypePtr type_;
|
| 279 |
+
// Future corresponding to request to create RRef on remote node.
|
| 280 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 281 |
+
c10::intrusive_ptr<JitFuture> ownerCreationFuture_;
|
| 282 |
+
};
|
| 283 |
+
|
| 284 |
+
// ``UserRRef`` represents a user of an RRef. Besides the ``RRefId``, each user
|
| 285 |
+
// also has a globally unique ``ForkId`` to identify this user. ``UserRRef``
|
| 286 |
+
// never owns the real value, the only way to get the value of the ``RRef`` is
|
| 287 |
+
// to call ``to_here()`` and get a copy..
|
| 288 |
+
class TORCH_API UserRRef final : public RRef {
|
| 289 |
+
public:
|
| 290 |
+
UserRRef(const UserRRef& other) = delete;
|
| 291 |
+
UserRRef(UserRRef&& other) = delete;
|
| 292 |
+
UserRRef& operator=(const UserRRef& other) = delete;
|
| 293 |
+
UserRRef& operator=(UserRRef&& other) = delete;
|
| 294 |
+
|
| 295 |
+
UserRRef(
|
| 296 |
+
worker_id_t ownerId,
|
| 297 |
+
const RRefId& rrefId,
|
| 298 |
+
const ForkId& forkId,
|
| 299 |
+
TypePtr type);
|
| 300 |
+
|
| 301 |
+
inline bool isOwner() const override {
|
| 302 |
+
return false;
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
inline bool confirmedByOwner() const override {
|
| 306 |
+
return confirmedByOwner_;
|
| 307 |
+
}
|
| 308 |
+
|
| 309 |
+
// Returns the globally unique ForkId of this RRef
|
| 310 |
+
const ForkId& forkId() const;
|
| 311 |
+
|
| 312 |
+
// Get of copy of the value from the ``OwnerRRef``. If the value is not ready
|
| 313 |
+
// yet, this call will block.
|
| 314 |
+
IValue toHere(
|
| 315 |
+
const float timeoutSeconds =
|
| 316 |
+
torch::distributed::rpc::kUnsetRpcTimeout) const;
|
| 317 |
+
|
| 318 |
+
void tryDel() override;
|
| 319 |
+
|
| 320 |
+
// Will be called when refcount reaches 0.
|
| 321 |
+
// Upon destruction, this ``UserRRef`` will tell the owner to deref.
|
| 322 |
+
void release_resources() override;
|
| 323 |
+
|
| 324 |
+
// Will be called when both refcount and weakcount reach 0. See
|
| 325 |
+
// https://github.com/pytorch/pytorch/blob/9116f02bebf3a5260feef5732d36c54ecb3b4033/c10/util/intrusive_ptr.h#L204
|
| 326 |
+
// This is called on destructing the wrapping intrusive_ptr_target instance
|
| 327 |
+
// and it's data members.
|
| 328 |
+
~UserRRef() override;
|
| 329 |
+
|
| 330 |
+
private:
|
| 331 |
+
friend class RRefContext;
|
| 332 |
+
|
| 333 |
+
RRefForkData fork() const override;
|
| 334 |
+
inline void confirm() {
|
| 335 |
+
confirmedByOwner_ = true;
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
const ForkId forkId_;
|
| 339 |
+
|
| 340 |
+
// Indicates if this user has sent delete message to it's owner.
|
| 341 |
+
// Note, thread safety is needed because delete message could be sent by
|
| 342 |
+
// either the destructor called by Python garbage collection or RRefContext
|
| 343 |
+
// proactive cleanup on RPC graceful shutdown.
|
| 344 |
+
std::mutex deletedOnOwnerMutex_;
|
| 345 |
+
bool deletedOnOwner_{false};
|
| 346 |
+
// Indicating whether this UserRRef has been confirmed by its owner.
|
| 347 |
+
std::atomic<bool> confirmedByOwner_;
|
| 348 |
+
};
|
| 349 |
+
|
| 350 |
+
// Keep the template only on the derived class because ``RRefContext`` needs to
|
| 351 |
+
// erase the type on ``RRef`` and keep them in one map.
|
| 352 |
+
class TORCH_API OwnerRRef final : public RRef {
|
| 353 |
+
public:
|
| 354 |
+
OwnerRRef(const OwnerRRef& other) = delete;
|
| 355 |
+
OwnerRRef(OwnerRRef&& other) = delete;
|
| 356 |
+
OwnerRRef& operator=(const OwnerRRef& other) = delete;
|
| 357 |
+
OwnerRRef& operator=(OwnerRRef&& other) = delete;
|
| 358 |
+
|
| 359 |
+
OwnerRRef(
|
| 360 |
+
worker_id_t ownerId,
|
| 361 |
+
const RRefId& rrefId,
|
| 362 |
+
TypePtr type,
|
| 363 |
+
std::vector<c10::Device> devices);
|
| 364 |
+
|
| 365 |
+
OwnerRRef(
|
| 366 |
+
worker_id_t ownerId,
|
| 367 |
+
const RRefId& rrefId,
|
| 368 |
+
TypePtr type,
|
| 369 |
+
c10::optional<IValue> value,
|
| 370 |
+
std::vector<c10::Device> devices);
|
| 371 |
+
|
| 372 |
+
inline bool isOwner() const override {
|
| 373 |
+
return true;
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
// OwnerRRef is always confirmed, while UserRRef is only confirmed when the
|
| 377 |
+
// owner knows about it.
|
| 378 |
+
inline bool confirmedByOwner() const override {
|
| 379 |
+
return true;
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
// Get a constant reference of the real value. This method will block if the
|
| 383 |
+
// value is not ready. This method does not need GIL as it does not create
|
| 384 |
+
// any new py::object. It will throw if there is an error.
|
| 385 |
+
const IValue& getValue() const;
|
| 386 |
+
|
| 387 |
+
// Set the value of this ``OwnerRRef``. This method does not need GIL as it
|
| 388 |
+
// does not create any new py::object.
|
| 389 |
+
void setValue(IValue&& value);
|
| 390 |
+
// Sets the value of this ``OwnerRRef`` to contain an exception.
|
| 391 |
+
void setError(std::exception_ptr eptr);
|
| 392 |
+
|
| 393 |
+
// Has a value or error been set?
|
| 394 |
+
bool hasValue() const;
|
| 395 |
+
// Gets a future that is satisfied when the value or error is set.
|
| 396 |
+
c10::intrusive_ptr<JitFuture> getFuture();
|
| 397 |
+
|
| 398 |
+
private:
|
| 399 |
+
friend class RRefContext;
|
| 400 |
+
|
| 401 |
+
c10::intrusive_ptr<JitFuture> future_;
|
| 402 |
+
};
|
| 403 |
+
|
| 404 |
+
TORCH_API std::ostream& operator<<(std::ostream& os, const RRef& rref);
|
| 405 |
+
|
| 406 |
+
// Helper function that casts from c10::RRefInterface to OwnerRRef
|
| 407 |
+
inline TORCH_API c10::intrusive_ptr<OwnerRRef> fromRRefInterface(
|
| 408 |
+
const c10::intrusive_ptr<c10::RRefInterface>& rrefInterface) {
|
| 409 |
+
return c10::static_intrusive_pointer_cast<OwnerRRef>(rrefInterface);
|
| 410 |
+
}
|
| 411 |
+
|
| 412 |
+
// Helper function that casts from OwnerRRef to c10::RRefInterface
|
| 413 |
+
inline TORCH_API c10::intrusive_ptr<c10::RRefInterface> fromOwnerRRef(
|
| 414 |
+
const c10::intrusive_ptr<RRef>& ownerRRef) {
|
| 415 |
+
return c10::static_intrusive_pointer_cast<c10::RRefInterface>(ownerRRef);
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
} // namespace rpc
|
| 419 |
+
} // namespace distributed
|
| 420 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace rpc {
|
| 10 |
+
|
| 11 |
+
// Return value of a builtin operator or a TorchScript function.
|
| 12 |
+
class TORCH_API ScriptResp final : public RpcCommandBase {
|
| 13 |
+
public:
|
| 14 |
+
explicit ScriptResp(at::IValue&& values);
|
| 15 |
+
|
| 16 |
+
const at::IValue& value();
|
| 17 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 18 |
+
static std::unique_ptr<ScriptResp> fromMessage(const Message& message);
|
| 19 |
+
|
| 20 |
+
private:
|
| 21 |
+
const at::IValue value_;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
} // namespace rpc
|
| 25 |
+
} // namespace distributed
|
| 26 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_agent.h
ADDED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_TENSORPIPE
|
| 4 |
+
|
| 5 |
+
#include <atomic>
|
| 6 |
+
#include <thread>
|
| 7 |
+
|
| 8 |
+
#include <c10/core/thread_pool.h>
|
| 9 |
+
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
|
| 10 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 11 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 12 |
+
|
| 13 |
+
// Forward-declare the TensorPipe classes we need, to avoid including its
|
| 14 |
+
// headers in PyTorch's ones and thus have it become a public dependency.
|
| 15 |
+
|
| 16 |
+
namespace tensorpipe {
|
| 17 |
+
|
| 18 |
+
class Context;
|
| 19 |
+
class Error;
|
| 20 |
+
class Listener;
|
| 21 |
+
class Message;
|
| 22 |
+
class Pipe;
|
| 23 |
+
|
| 24 |
+
namespace transport {
|
| 25 |
+
class Context;
|
| 26 |
+
} // namespace transport
|
| 27 |
+
|
| 28 |
+
namespace channel {
|
| 29 |
+
class Context;
|
| 30 |
+
} // namespace channel
|
| 31 |
+
|
| 32 |
+
} // namespace tensorpipe
|
| 33 |
+
|
| 34 |
+
namespace torch {
|
| 35 |
+
namespace distributed {
|
| 36 |
+
namespace rpc {
|
| 37 |
+
|
| 38 |
+
// These priorities instruct TensorPipe on which transport/channel to pick
|
| 39 |
+
// during handshake. Higher priorities will take precedence over lower ones.
|
| 40 |
+
// The transport with lowest priority will be the one used to bootstrap pipes.
|
| 41 |
+
|
| 42 |
+
constexpr int64_t kShmTransportPriority = 200;
|
| 43 |
+
constexpr int64_t kIbvTransportPriority = 100;
|
| 44 |
+
// The UV transport just uses TCP and should work everywhere, thus keep it last.
|
| 45 |
+
constexpr int64_t kUvTransportPriority = 0;
|
| 46 |
+
|
| 47 |
+
constexpr int64_t kCmaChannelPriority = 1200;
|
| 48 |
+
constexpr int64_t kMultiplexedUvChannelPriority = 1100;
|
| 49 |
+
// The basic channel reuses a transport as a channel, and is thus our fallback.
|
| 50 |
+
constexpr int64_t kBasicChannelPriority = 1000;
|
| 51 |
+
|
| 52 |
+
// CPU channel have higher priority than CUDA channels, since the latter might
|
| 53 |
+
// handle CPU-to-CPU transfers, but will always be less efficient than their
|
| 54 |
+
// CPU-only counterparts.
|
| 55 |
+
constexpr int64_t kCudaIpcChannelPriority = 300;
|
| 56 |
+
constexpr int64_t kCudaGdrChannelPriority = 200;
|
| 57 |
+
constexpr int64_t kCudaXthChannelPriority = 400;
|
| 58 |
+
constexpr int64_t kCudaBasicChannelPriority = 0;
|
| 59 |
+
|
| 60 |
+
using steady_clock_time_point =
|
| 61 |
+
std::chrono::time_point<std::chrono::steady_clock>;
|
| 62 |
+
|
| 63 |
+
struct TORCH_API TransportRegistration {
|
| 64 |
+
std::shared_ptr<tensorpipe::transport::Context> transport;
|
| 65 |
+
int64_t priority;
|
| 66 |
+
std::string address;
|
| 67 |
+
};
|
| 68 |
+
|
| 69 |
+
C10_DECLARE_REGISTRY(TensorPipeTransportRegistry, TransportRegistration);
|
| 70 |
+
|
| 71 |
+
struct TORCH_API ChannelRegistration {
|
| 72 |
+
std::shared_ptr<tensorpipe::channel::Context> channel;
|
| 73 |
+
int64_t priority;
|
| 74 |
+
};
|
| 75 |
+
|
| 76 |
+
C10_DECLARE_REGISTRY(TensorPipeChannelRegistry, ChannelRegistration);
|
| 77 |
+
|
| 78 |
+
constexpr auto kDefaultNumWorkerThreads = 16;
|
| 79 |
+
|
| 80 |
+
struct TORCH_API TensorPipeRpcBackendOptions : public RpcBackendOptions {
|
| 81 |
+
TensorPipeRpcBackendOptions(
|
| 82 |
+
int numWorkerThreads,
|
| 83 |
+
optional<std::vector<std::string>> transports,
|
| 84 |
+
optional<std::vector<std::string>> channels,
|
| 85 |
+
float rpc_timeout,
|
| 86 |
+
std::string init_method,
|
| 87 |
+
std::unordered_map<std::string, DeviceMap> device_maps = {},
|
| 88 |
+
std::vector<c10::Device> devices = {})
|
| 89 |
+
: RpcBackendOptions(rpc_timeout, init_method),
|
| 90 |
+
numWorkerThreads(numWorkerThreads),
|
| 91 |
+
transports(std::move(transports)),
|
| 92 |
+
channels(std::move(channels)),
|
| 93 |
+
deviceMaps(std::move(device_maps)),
|
| 94 |
+
devices(std::move(devices)) {
|
| 95 |
+
TORCH_CHECK(
|
| 96 |
+
numWorkerThreads > 0,
|
| 97 |
+
"num_worker_threads must be positive, got ",
|
| 98 |
+
numWorkerThreads);
|
| 99 |
+
|
| 100 |
+
if (this->transports.has_value()) {
|
| 101 |
+
for (const std::string& transportName : this->transports.value()) {
|
| 102 |
+
TORCH_CHECK(
|
| 103 |
+
TensorPipeTransportRegistry()->Has(transportName),
|
| 104 |
+
"Unknown transport: ",
|
| 105 |
+
transportName);
|
| 106 |
+
}
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
if (this->channels.has_value()) {
|
| 110 |
+
for (const std::string& channelName : this->channels.value()) {
|
| 111 |
+
TORCH_CHECK(
|
| 112 |
+
TensorPipeChannelRegistry()->Has(channelName),
|
| 113 |
+
"Unknown channel: ",
|
| 114 |
+
channelName);
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
void setDeviceMap(const std::string& workerName, const DeviceMap& deviceMap) {
|
| 120 |
+
auto iter = deviceMaps.find(workerName);
|
| 121 |
+
if (iter == deviceMaps.end()) {
|
| 122 |
+
deviceMaps[workerName] = deviceMap;
|
| 123 |
+
} else {
|
| 124 |
+
for (auto& entry : deviceMap) {
|
| 125 |
+
// c10::Device has no default constructor, hence map[device] dosn't work
|
| 126 |
+
// In C++-17 we can use insert_or_assign.
|
| 127 |
+
auto entryIter = iter->second.find(entry.first);
|
| 128 |
+
if (entryIter == iter->second.end()) {
|
| 129 |
+
iter->second.emplace(entry.first, entry.second);
|
| 130 |
+
} else {
|
| 131 |
+
entryIter->second = entry.second;
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
}
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
int numWorkerThreads;
|
| 138 |
+
const optional<std::vector<std::string>> transports;
|
| 139 |
+
const optional<std::vector<std::string>> channels;
|
| 140 |
+
std::unordered_map<std::string, DeviceMap> deviceMaps;
|
| 141 |
+
std::vector<c10::Device> devices;
|
| 142 |
+
};
|
| 143 |
+
|
| 144 |
+
// Struct to track the network source metrics
|
| 145 |
+
struct TORCH_API NetworkSourceInfo {
|
| 146 |
+
worker_id_t srcRank;
|
| 147 |
+
std::vector<uint8_t> srcMachineAddr;
|
| 148 |
+
};
|
| 149 |
+
|
| 150 |
+
// Struct to track aggregated network metrics
|
| 151 |
+
struct TORCH_API AggregatedNetworkData {
|
| 152 |
+
uint64_t numCalls{0};
|
| 153 |
+
uint64_t totalSentBytes{0};
|
| 154 |
+
uint64_t totalRecvBytes{0};
|
| 155 |
+
uint64_t totalErrors{0};
|
| 156 |
+
};
|
| 157 |
+
|
| 158 |
+
// TensorPipeAgent leverages TensorPipe (https://github.com/pytorch/tensorpipe)
|
| 159 |
+
// to transparently move tensors and payloads through the fastest available
|
| 160 |
+
// transport or channel. It acts like a hybrid RPC transport, providing shared
|
| 161 |
+
// memory (linux) and TCP (linux & mac) support. CUDA support is in progress.
|
| 162 |
+
class TORCH_API TensorPipeAgent : public RpcAgent {
|
| 163 |
+
public:
|
| 164 |
+
TensorPipeAgent(
|
| 165 |
+
const c10::intrusive_ptr<::c10d::Store>& store,
|
| 166 |
+
std::string selfName,
|
| 167 |
+
worker_id_t selfId,
|
| 168 |
+
optional<int> worldSize,
|
| 169 |
+
TensorPipeRpcBackendOptions opts,
|
| 170 |
+
std::unordered_map<std::string, DeviceMap> reverseDeviceMaps,
|
| 171 |
+
std::vector<c10::Device> devices,
|
| 172 |
+
std::unique_ptr<RequestCallback> cb);
|
| 173 |
+
|
| 174 |
+
TensorPipeAgent(const TensorPipeAgent&) = delete;
|
| 175 |
+
TensorPipeAgent& operator=(const TensorPipeAgent&) = delete;
|
| 176 |
+
|
| 177 |
+
c10::intrusive_ptr<JitFuture> send(
|
| 178 |
+
const WorkerInfo& to,
|
| 179 |
+
c10::intrusive_ptr<Message> message,
|
| 180 |
+
const float rpcTimeoutSeconds = kUnsetRpcTimeout,
|
| 181 |
+
const DeviceMap& deviceMap = {}) override;
|
| 182 |
+
|
| 183 |
+
// join() and sync() would be deprecated -
|
| 184 |
+
// https://github.com/pytorch/pytorch/issues/27647
|
| 185 |
+
void join(bool shutdown = false, float timeout = 0) override;
|
| 186 |
+
void sync() override{};
|
| 187 |
+
void startImpl() override;
|
| 188 |
+
void shutdownImpl() override;
|
| 189 |
+
|
| 190 |
+
~TensorPipeAgent() override;
|
| 191 |
+
|
| 192 |
+
const WorkerInfo& getWorkerInfo(const std::string& workerName) const override;
|
| 193 |
+
const WorkerInfo& getWorkerInfo(worker_id_t workerId) const override;
|
| 194 |
+
std::vector<WorkerInfo> getWorkerInfos() const override;
|
| 195 |
+
void updateGroupMembership(
|
| 196 |
+
const WorkerInfo& workerInfo,
|
| 197 |
+
const std::vector<c10::Device>& devices,
|
| 198 |
+
const std::unordered_map<std::string, DeviceMap>& reverseDeviceMaps,
|
| 199 |
+
bool isJoin);
|
| 200 |
+
|
| 201 |
+
std::unordered_map<std::string, std::string> getMetrics() override;
|
| 202 |
+
|
| 203 |
+
void addGilWaitTime(const std::chrono::microseconds gilWaitTime) override;
|
| 204 |
+
|
| 205 |
+
TensorPipeRpcBackendOptions getBackendOptions() const;
|
| 206 |
+
|
| 207 |
+
const c10::intrusive_ptr<::c10d::Store> getStore() const;
|
| 208 |
+
|
| 209 |
+
DeviceMap getDeviceMap(const WorkerInfo& dest) const override;
|
| 210 |
+
|
| 211 |
+
const std::vector<c10::Device>& getDevices() const override;
|
| 212 |
+
|
| 213 |
+
using NetworkDataDict =
|
| 214 |
+
std::unordered_map<std::string, AggregatedNetworkData>;
|
| 215 |
+
|
| 216 |
+
// Returns metrics tracked by the NetworkDataDict
|
| 217 |
+
NetworkDataDict getNetworkData();
|
| 218 |
+
// Returns NetworkSourceInfo struct
|
| 219 |
+
NetworkSourceInfo getNetworkSourceInfo();
|
| 220 |
+
|
| 221 |
+
static const std::string& guessAddress();
|
| 222 |
+
|
| 223 |
+
// For testing purposes.
|
| 224 |
+
size_t timeoutMapSize();
|
| 225 |
+
size_t numPendingResponses();
|
| 226 |
+
size_t messageIdToTimeoutMapSize();
|
| 227 |
+
|
| 228 |
+
const bool isStaticGroup_;
|
| 229 |
+
|
| 230 |
+
protected:
|
| 231 |
+
// TensorPipe write function that could be used to write response
|
| 232 |
+
// messages by server, and write request messages by client. This
|
| 233 |
+
// is a protected method since it is overwritten by FaultyTensorPipeAgent
|
| 234 |
+
virtual void pipeWrite(
|
| 235 |
+
const std::shared_ptr<tensorpipe::Pipe>&,
|
| 236 |
+
c10::intrusive_ptr<Message> message,
|
| 237 |
+
std::vector<c10::Device>&& devices,
|
| 238 |
+
std::vector<c10::Stream> streams,
|
| 239 |
+
std::function<void(const tensorpipe::Error&)>) noexcept;
|
| 240 |
+
|
| 241 |
+
private:
|
| 242 |
+
// Removes the given messageId with the given expirationTime from the
|
| 243 |
+
// timeoutMap_.
|
| 244 |
+
void removeFromTimeoutMap(uint64_t messageId);
|
| 245 |
+
|
| 246 |
+
// Populates workerIdToInfo_ and workerNameToInfo_ using addressStore_
|
| 247 |
+
void prepareNames(bool isStaticGroup);
|
| 248 |
+
|
| 249 |
+
// Check the static group attribute with the value set in store
|
| 250 |
+
void checkAndSetStaticGroup(const c10::intrusive_ptr<::c10d::Store>& store);
|
| 251 |
+
|
| 252 |
+
const std::string& findWorkerURL(const WorkerInfo& worker) const;
|
| 253 |
+
|
| 254 |
+
// Only use for Dynamic RPC groups, method to have worker leave group
|
| 255 |
+
void leaveGroup();
|
| 256 |
+
|
| 257 |
+
// TensorPipe read function that could be used to read response messages
|
| 258 |
+
// by client, and read request messages by server.
|
| 259 |
+
void pipeRead(
|
| 260 |
+
const std::shared_ptr<tensorpipe::Pipe>&,
|
| 261 |
+
std::function<void(
|
| 262 |
+
const tensorpipe::Error&,
|
| 263 |
+
c10::intrusive_ptr<Message>,
|
| 264 |
+
std::vector<c10::Stream>)>) noexcept;
|
| 265 |
+
|
| 266 |
+
// Callback of listener accept()
|
| 267 |
+
void onListenerAccepted(
|
| 268 |
+
const tensorpipe::Error& error,
|
| 269 |
+
std::shared_ptr<tensorpipe::Pipe>& pipe);
|
| 270 |
+
|
| 271 |
+
// Respond to a call from a peer
|
| 272 |
+
void respond(std::shared_ptr<tensorpipe::Pipe>& pipe);
|
| 273 |
+
|
| 274 |
+
void sendCompletedResponseMessage(
|
| 275 |
+
std::shared_ptr<tensorpipe::Pipe>& pipe,
|
| 276 |
+
JitFuture& futureResponseMessage,
|
| 277 |
+
uint64_t messageId,
|
| 278 |
+
std::vector<c10::Stream> stream);
|
| 279 |
+
|
| 280 |
+
// Collects metrics from successful RPC calls
|
| 281 |
+
void trackNetworkData(
|
| 282 |
+
uint64_t requestSize,
|
| 283 |
+
uint64_t responseSize,
|
| 284 |
+
const std::string& destWorkerName);
|
| 285 |
+
|
| 286 |
+
// Collects metrics from failed RPC calls
|
| 287 |
+
void trackNetworkError(
|
| 288 |
+
uint64_t requestSize,
|
| 289 |
+
const std::string& destWorkerName);
|
| 290 |
+
|
| 291 |
+
inline std::vector<c10::Device> getDevicesForRemote(
|
| 292 |
+
const std::string& remoteName,
|
| 293 |
+
const Message& message) const;
|
| 294 |
+
|
| 295 |
+
// When a request+response completes, we need to mark the future message as
|
| 296 |
+
// complete. However, if its timeout has already expired, it already has an
|
| 297 |
+
// error set. There is no atomic "test-and-set" way to mark a future complete
|
| 298 |
+
// only if it isn't yet. It does exist for errors (setErrorIfNeeded) but, even
|
| 299 |
+
// then, it ends up printing a log message, which may worry the user. To solve
|
| 300 |
+
// both issues we use a separate atomic flag to know the status of the future.
|
| 301 |
+
struct AtomicJitFuture {
|
| 302 |
+
explicit AtomicJitFuture(const std::vector<c10::Device>& devices) {
|
| 303 |
+
jitFuture = c10::make_intrusive<at::ivalue::Future>(
|
| 304 |
+
at::AnyClassType::get(), devices);
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
std::atomic_flag isComplete = ATOMIC_FLAG_INIT;
|
| 308 |
+
c10::intrusive_ptr<JitFuture> jitFuture;
|
| 309 |
+
};
|
| 310 |
+
|
| 311 |
+
// Maintains state per client pipe to track pending response messages and
|
| 312 |
+
// error states. pendingResponseMessage_ should be protected by a mutex since
|
| 313 |
+
// it can be raced with user send() call.
|
| 314 |
+
// TODO: To achieve better performance we can have a pipe pool per
|
| 315 |
+
// client that can be configured using RpcBackendOptions.
|
| 316 |
+
struct ClientPipe {
|
| 317 |
+
explicit ClientPipe(std::shared_ptr<tensorpipe::Pipe> pipe)
|
| 318 |
+
: pipe_(std::move(pipe)) {}
|
| 319 |
+
std::shared_ptr<tensorpipe::Pipe> pipe_;
|
| 320 |
+
mutable std::mutex mutex_;
|
| 321 |
+
bool inError_{false};
|
| 322 |
+
// Map from Message Request ID's to corresponding futures.
|
| 323 |
+
std::unordered_map<uint64_t, std::shared_ptr<AtomicJitFuture>>
|
| 324 |
+
pendingResponseMessage_;
|
| 325 |
+
};
|
| 326 |
+
|
| 327 |
+
const c10::intrusive_ptr<::c10d::Store> store_;
|
| 328 |
+
|
| 329 |
+
const TensorPipeRpcBackendOptions opts_;
|
| 330 |
+
// For dynamic RPC, the reverse device maps are updated whenever a new rank
|
| 331 |
+
// joins or leaves the group
|
| 332 |
+
std::unordered_map<std::string, DeviceMap> reverseDeviceMaps_;
|
| 333 |
+
// Local devices used by this agent. If application didn't specify this
|
| 334 |
+
// field, it will be initialized using corresponding local devices in
|
| 335 |
+
// opts_.deviceMaps and reverseDeviceMaps_;
|
| 336 |
+
std::vector<c10::Device> devices_;
|
| 337 |
+
|
| 338 |
+
ThreadPool threadPool_;
|
| 339 |
+
std::shared_ptr<tensorpipe::Context> context_;
|
| 340 |
+
std::shared_ptr<tensorpipe::Listener> listener_;
|
| 341 |
+
|
| 342 |
+
mutable std::mutex connectedPipesMutex_;
|
| 343 |
+
std::unordered_map<worker_id_t, ClientPipe> connectedPipes_;
|
| 344 |
+
|
| 345 |
+
// Maps keyed on name and id for easy WorkerInfo lookup.
|
| 346 |
+
std::unordered_map<worker_id_t, WorkerInfo> workerIdToInfo_;
|
| 347 |
+
std::unordered_map<std::string, WorkerInfo> workerNameToInfo_;
|
| 348 |
+
std::unordered_map<std::string, std::string> workerNameToURL_;
|
| 349 |
+
|
| 350 |
+
::c10d::PrefixStore rankToNameStore_;
|
| 351 |
+
::c10d::PrefixStore nameToAddressStore_;
|
| 352 |
+
// Store keys that will used to count joined processes and active calls during
|
| 353 |
+
// the shutdown process
|
| 354 |
+
::c10d::PrefixStore shutdownStore_;
|
| 355 |
+
int worldSize_ = 0;
|
| 356 |
+
std::atomic<uint64_t> nextMessageID_{0};
|
| 357 |
+
|
| 358 |
+
// Metadata used for tracking of whether certain RPCs have timed out or not.
|
| 359 |
+
struct TimeoutMessageMetadata {
|
| 360 |
+
TimeoutMessageMetadata(
|
| 361 |
+
uint64_t messageId_,
|
| 362 |
+
std::shared_ptr<AtomicJitFuture> responseFuture_,
|
| 363 |
+
std::chrono::milliseconds timeout_)
|
| 364 |
+
: messageId(messageId_),
|
| 365 |
+
responseFuture(std::move(responseFuture_)),
|
| 366 |
+
timeout(timeout_) {}
|
| 367 |
+
uint64_t messageId;
|
| 368 |
+
std::shared_ptr<AtomicJitFuture> responseFuture;
|
| 369 |
+
std::chrono::milliseconds timeout;
|
| 370 |
+
};
|
| 371 |
+
|
| 372 |
+
// Map to store the expiration times for each message.
|
| 373 |
+
std::map<steady_clock_time_point, std::vector<TimeoutMessageMetadata>>
|
| 374 |
+
timeoutMap_;
|
| 375 |
+
|
| 376 |
+
// Map to store the messageId to expiry time.
|
| 377 |
+
std::unordered_map<uint64_t, steady_clock_time_point> messageIdToTimeout_;
|
| 378 |
+
|
| 379 |
+
// Thread that will poll the timeoutMap_ for timed out messages and mark them
|
| 380 |
+
// with an error accordingly
|
| 381 |
+
std::thread timeoutThread_;
|
| 382 |
+
|
| 383 |
+
// Function run by the timeoutThread_ to check for timed out RPCs
|
| 384 |
+
void pollTimeoutRpcs();
|
| 385 |
+
|
| 386 |
+
// Mutex to guard the timeoutMap_
|
| 387 |
+
std::mutex timeoutMapMutex_;
|
| 388 |
+
|
| 389 |
+
// Condition Variable to signal population of the timeoutMap_
|
| 390 |
+
std::condition_variable timeoutThreadCV_;
|
| 391 |
+
|
| 392 |
+
// Returns the expiration time for an RPC by adding the current time to the
|
| 393 |
+
// passed in timeout.
|
| 394 |
+
inline steady_clock_time_point computeRpcMessageExpiryTime(
|
| 395 |
+
std::chrono::milliseconds timeout) const {
|
| 396 |
+
return std::chrono::time_point_cast<std::chrono::milliseconds>(
|
| 397 |
+
std::chrono::steady_clock::now() + timeout);
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
// Handle error on an outgoing pipe
|
| 401 |
+
void handleClientError(
|
| 402 |
+
ClientPipe& clientPipe,
|
| 403 |
+
const tensorpipe::Error& error);
|
| 404 |
+
|
| 405 |
+
// This is a generic struct for capturing Time-Series Metrics. It keeps a
|
| 406 |
+
// running sum and count of data points (observations), and can return an
|
| 407 |
+
// average of the data points seen so far. This is currently only used for
|
| 408 |
+
// tracking the GIL Wait Time in RPC Agents, but can be used for other metrics
|
| 409 |
+
// as well.
|
| 410 |
+
struct TimeSeriesMetricsTracker {
|
| 411 |
+
// Running sum of the data points seen so far
|
| 412 |
+
uint64_t currentSum_;
|
| 413 |
+
// Running count of the data points seen so far
|
| 414 |
+
uint64_t currentCount_;
|
| 415 |
+
|
| 416 |
+
explicit TimeSeriesMetricsTracker(
|
| 417 |
+
uint64_t currentSum = 0,
|
| 418 |
+
uint64_t currentCount = 0);
|
| 419 |
+
|
| 420 |
+
// Adds a data point (which is basically one observation for the metric
|
| 421 |
+
// being tracked) to the running sum and count.
|
| 422 |
+
void addData(uint64_t dataPoint);
|
| 423 |
+
// Returns the average of all the data points seen so far.
|
| 424 |
+
float computeAverage() const;
|
| 425 |
+
};
|
| 426 |
+
|
| 427 |
+
// Map of Time-Series metrics tracked by the RPC Agent
|
| 428 |
+
std::unordered_map<std::string, TimeSeriesMetricsTracker> timeSeriesMetrics_;
|
| 429 |
+
// Mutex to guard timeSeriesMetrics_
|
| 430 |
+
std::mutex metricsMutex_;
|
| 431 |
+
|
| 432 |
+
// Custom lock guard used to check if the RPC group is dynamic and lock the
|
| 433 |
+
// mutex if so
|
| 434 |
+
struct GroupMembershipLockGuard {
|
| 435 |
+
GroupMembershipLockGuard(std::mutex& mutex, bool isStaticGroup)
|
| 436 |
+
: ref_(mutex), isStaticGroup_(isStaticGroup) {
|
| 437 |
+
if (isStaticGroup_) {
|
| 438 |
+
ref_.lock();
|
| 439 |
+
}
|
| 440 |
+
}
|
| 441 |
+
|
| 442 |
+
~GroupMembershipLockGuard() {
|
| 443 |
+
if (isStaticGroup_) {
|
| 444 |
+
ref_.unlock();
|
| 445 |
+
}
|
| 446 |
+
}
|
| 447 |
+
|
| 448 |
+
GroupMembershipLockGuard(const GroupMembershipLockGuard&) = delete;
|
| 449 |
+
|
| 450 |
+
private:
|
| 451 |
+
std::mutex& ref_;
|
| 452 |
+
bool isStaticGroup_;
|
| 453 |
+
};
|
| 454 |
+
// Mutex to guard access to group membership data
|
| 455 |
+
// e.g. updates to (workerIdToInfo_, workerNameToInfo_, workerNameToURL_)
|
| 456 |
+
mutable std::mutex groupMembershipMutex_;
|
| 457 |
+
|
| 458 |
+
// Map to Track Network Data
|
| 459 |
+
NetworkDataDict networkData_;
|
| 460 |
+
// Mutex to guard networkData_
|
| 461 |
+
std::mutex networkDataMutex_;
|
| 462 |
+
|
| 463 |
+
// A mutex and a cv to guard access to the call counts and watch for changes.
|
| 464 |
+
std::mutex callCountMutex_;
|
| 465 |
+
std::condition_variable callCountCV_;
|
| 466 |
+
// Running total of un-processed, un-errored RPC calls sent
|
| 467 |
+
int32_t clientActiveCalls_{0};
|
| 468 |
+
// Running total of un-processed RPC requests received
|
| 469 |
+
int32_t serverActiveCalls_{0};
|
| 470 |
+
// Running total of RPC requests that will be completed asynchronously
|
| 471 |
+
int32_t serverActiveAsyncCalls_{0};
|
| 472 |
+
|
| 473 |
+
// Whether a global graceful shutdown has begun, in which case we'll silence
|
| 474 |
+
// error messages due to remote workers closing their pipes.
|
| 475 |
+
std::atomic<bool> shuttingDown_{false};
|
| 476 |
+
|
| 477 |
+
// Helpers to modify the counts while correctly dealing with the mutex and cv.
|
| 478 |
+
void increaseCallCount(int32_t& count);
|
| 479 |
+
void decreaseCallCount(int32_t& count);
|
| 480 |
+
|
| 481 |
+
// Helpers to set the state of the requests.
|
| 482 |
+
void markFutureAsComplete(
|
| 483 |
+
std::shared_ptr<AtomicJitFuture> atomicFuture,
|
| 484 |
+
c10::intrusive_ptr<Message> message,
|
| 485 |
+
std::vector<c10::Stream> streams);
|
| 486 |
+
void markFutureWithError(
|
| 487 |
+
std::shared_ptr<AtomicJitFuture> atomicFuture,
|
| 488 |
+
std::string errorMsg);
|
| 489 |
+
};
|
| 490 |
+
|
| 491 |
+
} // namespace rpc
|
| 492 |
+
} // namespace distributed
|
| 493 |
+
} // namespace torch
|
| 494 |
+
|
| 495 |
+
#endif // USE_TENSORPIPE
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Device.h>
|
| 4 |
+
#include <c10/core/Event.h>
|
| 5 |
+
#include <c10/core/Stream.h>
|
| 6 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 8 |
+
#include <torch/csrc/jit/serialization/pickle.h>
|
| 9 |
+
#include <torch/csrc/utils/byte_order.h>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace distributed {
|
| 13 |
+
namespace rpc {
|
| 14 |
+
|
| 15 |
+
// Parse error message and return RPCErrorType based on the message.
|
| 16 |
+
TORCH_API RPCErrorType getRPCErrorType(const JitFuture& jitFuture);
|
| 17 |
+
// Create an error string given the error description and error type
|
| 18 |
+
TORCH_API std::string makeRPCError(
|
| 19 |
+
const std::string& rpcErrorStr,
|
| 20 |
+
RPCErrorType errorType);
|
| 21 |
+
|
| 22 |
+
// Given an RPC message received as a request over the wire, deserialize it into
|
| 23 |
+
// the appropriate 'RpcCommandBase' type.
|
| 24 |
+
TORCH_API std::unique_ptr<RpcCommandBase> deserializeRequest(
|
| 25 |
+
const Message& request);
|
| 26 |
+
|
| 27 |
+
// Given an RPC message received as a response over the wire, deserialize it
|
| 28 |
+
// into the appropriate 'RpcCommandBase' type, if the response is
|
| 29 |
+
// FORWARD_AUTOGRAD_RESP type, unwrap it, attach recvBackward() functions
|
| 30 |
+
// to received tensors and set the wrappedMsgType to its wrapped message type.
|
| 31 |
+
TORCH_API std::unique_ptr<RpcCommandBase> deserializeResponse(
|
| 32 |
+
const Message& response,
|
| 33 |
+
MessageType& wrappedMsgType);
|
| 34 |
+
|
| 35 |
+
// Given an RPC message received as a response over the wire, deserialize it
|
| 36 |
+
// into the valid IValue if the message is for a script rpc result,
|
| 37 |
+
// otherwise deserialize it into dummy none ivalue that will never be used.
|
| 38 |
+
// In this deserialization, we also attach recv rpc backward functions if
|
| 39 |
+
// needed.
|
| 40 |
+
IValue deserializeResptoIValueInternal(
|
| 41 |
+
RpcCommandBase& rpc,
|
| 42 |
+
MessageType messageType);
|
| 43 |
+
TORCH_API IValue deserializeRespToIValue(const Message& message);
|
| 44 |
+
|
| 45 |
+
// Note: format is subject to change and intended for RPCs.
|
| 46 |
+
// For saving persistently to disk, use torch::save().
|
| 47 |
+
TORCH_API std::string wireSerialize(
|
| 48 |
+
const std::vector<char>& payload,
|
| 49 |
+
const std::vector<at::Tensor>& tensors);
|
| 50 |
+
|
| 51 |
+
TORCH_API std::pair<std::vector<char>, std::vector<at::Tensor>> wireDeserialize(
|
| 52 |
+
const void* data,
|
| 53 |
+
size_t data_size);
|
| 54 |
+
|
| 55 |
+
// We use vector<char> as the type of blobs because it's what rpc::Message uses
|
| 56 |
+
// for its payload, even though it has the disadvantage that it cannot be
|
| 57 |
+
// allocated with uninitialized memory: it is always zeroed out.
|
| 58 |
+
|
| 59 |
+
// Some Tensors are effectively views of larger Tensors, where only a small
|
| 60 |
+
// subset of the Storage data is referenced. This normally is good and avoids
|
| 61 |
+
// copies when kept locally, but if we naively push the whole Storage over the
|
| 62 |
+
// wire, we'll end up with excess network traffic. This change clones tensors if
|
| 63 |
+
// we'd save at least half the data, and over a minimum hurdle.
|
| 64 |
+
TORCH_API c10::List<at::Tensor> cloneSparseTensors(
|
| 65 |
+
const std::vector<at::Tensor>& tensors);
|
| 66 |
+
|
| 67 |
+
// Combines an original payload and wrapped payload into the original payload.
|
| 68 |
+
// Used to generate the overall payload for the wrapped RPC.
|
| 69 |
+
TORCH_API void writeWrappedPayload(
|
| 70 |
+
std::vector<char>& originalPayload,
|
| 71 |
+
std::vector<char>& additionalPayload);
|
| 72 |
+
|
| 73 |
+
// Reads the additional, wrapped payload from a wrapped RPC off of the input
|
| 74 |
+
// payload. After this, payload will contain the payload of the original,
|
| 75 |
+
// un-wrapped RPC.
|
| 76 |
+
TORCH_API std::vector<at::IValue> readWrappedPayload(
|
| 77 |
+
std::vector<char>& payload,
|
| 78 |
+
const rpc::Message& message);
|
| 79 |
+
|
| 80 |
+
// Takes a list of events from autograd profiler and populates them into
|
| 81 |
+
// profiledEvents to be carried over RPC.
|
| 82 |
+
TORCH_API void populateRemoteProfiledEvents(
|
| 83 |
+
std::vector<torch::autograd::profiler::LegacyEvent>& profiledEvents,
|
| 84 |
+
const torch::autograd::profiler::ProfilerConfig& profilerConfig,
|
| 85 |
+
const std::vector<std::vector<torch::autograd::profiler::LegacyEvent>>&
|
| 86 |
+
eventLists);
|
| 87 |
+
|
| 88 |
+
} // namespace rpc
|
| 89 |
+
} // namespace distributed
|
| 90 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/function_schema_parser.h
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/function_schema.h>
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <variant>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace jit {
|
| 10 |
+
|
| 11 |
+
TORCH_API std::variant<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName(
|
| 12 |
+
const std::string& schemaOrName);
|
| 13 |
+
TORCH_API c10::FunctionSchema parseSchema(const std::string& schema);
|
| 14 |
+
TORCH_API c10::OperatorName parseName(const std::string& name);
|
| 15 |
+
|
| 16 |
+
} // namespace jit
|
| 17 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/name_mangler.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/qualified_name.h>
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace jit {
|
| 8 |
+
|
| 9 |
+
/**
|
| 10 |
+
* class NameMangler
|
| 11 |
+
*
|
| 12 |
+
* Utility to mangle qualified names in order to make them unique. We use this
|
| 13 |
+
* in various places where we to de-duplicate qualified names.
|
| 14 |
+
*/
|
| 15 |
+
class TORCH_API NameMangler {
|
| 16 |
+
public:
|
| 17 |
+
// Given a qualified name, return a mangled version that is guaranteed to be
|
| 18 |
+
// unique with respect to previous/future calls of `mangled()` on this name
|
| 19 |
+
// mangler instance.
|
| 20 |
+
c10::QualifiedName mangle(const c10::QualifiedName& name);
|
| 21 |
+
|
| 22 |
+
private:
|
| 23 |
+
size_t mangleIndex_ = 0;
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace jit
|
| 27 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parser.h
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <torch/csrc/Export.h>
|
| 3 |
+
#include <torch/csrc/jit/frontend/tree.h>
|
| 4 |
+
#include <torch/csrc/jit/frontend/tree_views.h>
|
| 5 |
+
#include <memory>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace jit {
|
| 9 |
+
|
| 10 |
+
struct Decl;
|
| 11 |
+
struct ParserImpl;
|
| 12 |
+
struct Lexer;
|
| 13 |
+
|
| 14 |
+
TORCH_API Decl mergeTypesFromTypeComment(
|
| 15 |
+
const Decl& decl,
|
| 16 |
+
const Decl& type_annotation_decl,
|
| 17 |
+
bool is_method);
|
| 18 |
+
|
| 19 |
+
struct TORCH_API Parser {
|
| 20 |
+
explicit Parser(const std::shared_ptr<Source>& src);
|
| 21 |
+
TreeRef parseFunction(bool is_method);
|
| 22 |
+
TreeRef parseClass();
|
| 23 |
+
Decl parseTypeComment();
|
| 24 |
+
Expr parseExp();
|
| 25 |
+
Lexer& lexer();
|
| 26 |
+
~Parser();
|
| 27 |
+
|
| 28 |
+
private:
|
| 29 |
+
std::unique_ptr<ParserImpl> pImpl;
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
} // namespace jit
|
| 33 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/schema_type_parser.h
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/alias_info.h>
|
| 4 |
+
#include <ATen/core/jit_type.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <c10/util/FunctionRef.h>
|
| 7 |
+
#include <torch/csrc/jit/frontend/lexer.h>
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace jit {
|
| 11 |
+
|
| 12 |
+
using TypePtr = c10::TypePtr;
|
| 13 |
+
|
| 14 |
+
struct TORCH_API SchemaTypeParser {
|
| 15 |
+
TypePtr parseBaseType();
|
| 16 |
+
c10::optional<c10::AliasInfo> parseAliasAnnotation();
|
| 17 |
+
std::pair<TypePtr, c10::optional<c10::AliasInfo>> parseType();
|
| 18 |
+
std::tuple</*fake*/ TypePtr, /*real*/ TypePtr, c10::optional<c10::AliasInfo>>
|
| 19 |
+
parseFakeAndRealType();
|
| 20 |
+
c10::optional<at::ScalarType> parseTensorDType(const std::string& dtype);
|
| 21 |
+
TypePtr parseRefinedTensor();
|
| 22 |
+
|
| 23 |
+
SchemaTypeParser(Lexer& L, bool parse_complete_tensor_types)
|
| 24 |
+
: complete_tensor_types(parse_complete_tensor_types), L(L) {}
|
| 25 |
+
|
| 26 |
+
private:
|
| 27 |
+
c10::optional<bool> tryToParseRequiresGrad();
|
| 28 |
+
c10::optional<c10::Device> tryToParseDeviceType();
|
| 29 |
+
void parseList(
|
| 30 |
+
int begin,
|
| 31 |
+
int sep,
|
| 32 |
+
int end,
|
| 33 |
+
c10::function_ref<void()> callback);
|
| 34 |
+
|
| 35 |
+
bool complete_tensor_types;
|
| 36 |
+
Lexer& L;
|
| 37 |
+
size_t next_id = 0;
|
| 38 |
+
};
|
| 39 |
+
} // namespace jit
|
| 40 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/check_alias_annotation.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 5 |
+
#include <memory>
|
| 6 |
+
#include <string>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace jit {
|
| 11 |
+
|
| 12 |
+
// Verify that alias annotations are correct. See impl for definition of
|
| 13 |
+
// "correct".
|
| 14 |
+
//
|
| 15 |
+
// This function expects a graph with a single op with `unqualifiedOpName`, plus
|
| 16 |
+
// the inputs that you would otherwise have passed to the graph executor.
|
| 17 |
+
TORCH_API void checkAliasAnnotation(
|
| 18 |
+
const std::shared_ptr<Graph>& graph,
|
| 19 |
+
std::vector<IValue> pythonInputs,
|
| 20 |
+
const std::string& unqualifiedOpName);
|
| 21 |
+
} // namespace jit
|
| 22 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/passes/utils/memory_dag.h
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/jit_type.h>
|
| 4 |
+
#include <c10/util/ArrayRef.h>
|
| 5 |
+
#include <c10/util/Optional.h>
|
| 6 |
+
#include <c10/util/flat_hash_map.h>
|
| 7 |
+
#include <c10/util/sparse_bitset.h>
|
| 8 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 9 |
+
#include <torch/csrc/jit/ir/type_hashing.h>
|
| 10 |
+
#include <memory>
|
| 11 |
+
#include <unordered_map>
|
| 12 |
+
#include <unordered_set>
|
| 13 |
+
#include <vector>
|
| 14 |
+
|
| 15 |
+
#include <torch/csrc/Export.h>
|
| 16 |
+
|
| 17 |
+
// Uses a compressed index representation for faster comparisons
|
| 18 |
+
typedef c10::SparseBitVector<256> MemoryLocations;
|
| 19 |
+
namespace torch {
|
| 20 |
+
namespace jit {
|
| 21 |
+
|
| 22 |
+
struct Element;
|
| 23 |
+
struct Value;
|
| 24 |
+
class MemoryDAG;
|
| 25 |
+
|
| 26 |
+
using AliasTypeSet = std::vector<TypePtr>;
|
| 27 |
+
|
| 28 |
+
/**
|
| 29 |
+
* Helper to build up the points-to graph.
|
| 30 |
+
*
|
| 31 |
+
* We separate the "building" into a different class because it allows us to
|
| 32 |
+
* cache internally to MemoryDAG without worrying about how the DAG structure
|
| 33 |
+
* is mutated.
|
| 34 |
+
*/
|
| 35 |
+
class TORCH_API MemoryDAGBuilder {
|
| 36 |
+
public:
|
| 37 |
+
MemoryDAGBuilder() = default;
|
| 38 |
+
MemoryDAGBuilder(const MemoryDAGBuilder&) = delete;
|
| 39 |
+
MemoryDAGBuilder& operator=(const MemoryDAGBuilder&) = delete;
|
| 40 |
+
|
| 41 |
+
// Make `from` point at `to`.
|
| 42 |
+
void makePointerTo(Element* from, Element* to);
|
| 43 |
+
|
| 44 |
+
void addToContainedElements(Element* contained, Element* container);
|
| 45 |
+
|
| 46 |
+
// Make a fresh Element (i.e. an Element that doesn't point to anything) and
|
| 47 |
+
// return it.
|
| 48 |
+
Element* makeFreshValue(const Value* v);
|
| 49 |
+
|
| 50 |
+
friend MemoryDAG;
|
| 51 |
+
|
| 52 |
+
private:
|
| 53 |
+
// `MemoryDAGBuilder` builds up `indexToElementMap_`, then uses
|
| 54 |
+
// the map to construct the `MemoryDAG`
|
| 55 |
+
std::vector<std::unique_ptr<Element>> indexToElementMap_;
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
// class MemoryDAG
|
| 59 |
+
//
|
| 60 |
+
// This class tracks the "A points to B" graph for all values. It is used by
|
| 61 |
+
// AliasDb to provide a higher-level API.
|
| 62 |
+
//
|
| 63 |
+
// We maintain a DAG where:
|
| 64 |
+
// - Vertices (called "Elements") represent Values and
|
| 65 |
+
// other aliasing entities (e.g. the stuff inside a list)
|
| 66 |
+
// - Edges represent a "points-to" relationship.
|
| 67 |
+
//
|
| 68 |
+
// Leaves in this DAG are entities that don't point to anything, and thus
|
| 69 |
+
// correspond to unique "memory locations".
|
| 70 |
+
//
|
| 71 |
+
// So, by traversing the "points-to" graph to the leaves, you can determine
|
| 72 |
+
// which memory locations an element may point to.
|
| 73 |
+
class TORCH_API MemoryDAG {
|
| 74 |
+
public:
|
| 75 |
+
explicit MemoryDAG(std::unique_ptr<MemoryDAGBuilder> builder)
|
| 76 |
+
: indexToElementMap_(std::move(builder->indexToElementMap_)) {}
|
| 77 |
+
// explicitly delete copy constructor because otherwise windows build is
|
| 78 |
+
// confused for an exported class see
|
| 79 |
+
// https://stackoverflow.com/a/51033485/105137
|
| 80 |
+
MemoryDAG(const MemoryDAG&) = delete;
|
| 81 |
+
MemoryDAG& operator=(const MemoryDAG&) = delete;
|
| 82 |
+
|
| 83 |
+
// Return the unique memory locations that `Element` might represent.
|
| 84 |
+
const MemoryLocations& getMemoryLocations(const Element* e) const;
|
| 85 |
+
|
| 86 |
+
// Do `a` and `b` potentially share a memory location?
|
| 87 |
+
bool mayAlias(const Element* a, const Element* b) const;
|
| 88 |
+
|
| 89 |
+
// Does `a` hold reference to any memory that is stored in `b`, or vice versa?
|
| 90 |
+
bool mayContainAlias(const Element* a, const Element* b) const;
|
| 91 |
+
|
| 92 |
+
bool mayContainAlias(const Element* a, const at::ArrayRef<Element*> b) const;
|
| 93 |
+
|
| 94 |
+
bool mayContainAlias(
|
| 95 |
+
const at::ArrayRef<Element*> a,
|
| 96 |
+
const at::ArrayRef<Element*> b) const;
|
| 97 |
+
|
| 98 |
+
// Converts from the compressed index representation
|
| 99 |
+
const Element* fromIndex(unsigned x) const;
|
| 100 |
+
Element* fromIndex(unsigned x);
|
| 101 |
+
void collectAllContainedMemoryLocations(
|
| 102 |
+
const Element* elem,
|
| 103 |
+
MemoryLocations& cont) const;
|
| 104 |
+
|
| 105 |
+
/**
|
| 106 |
+
* The following methods are special cases where we need to mutate the
|
| 107 |
+
* internals of MemoryDAG for efficiency reasons. Don't call them unless you
|
| 108 |
+
* know what you're doing! In particular, don't add new mutating methods
|
| 109 |
+
* without ensuring that you are maintaining cache consistency for memory
|
| 110 |
+
* locations.
|
| 111 |
+
*/
|
| 112 |
+
|
| 113 |
+
// Adding wildcards can trigger extremely expensive cache invalidations. This
|
| 114 |
+
// method adds them in a more efficient cache-aware way.
|
| 115 |
+
void setWildcards(
|
| 116 |
+
const std::unordered_set<const Value*>& wildcards,
|
| 117 |
+
const ska::flat_hash_map<const Value*, Element*>& elementMap,
|
| 118 |
+
const std::function<Element*(const Value*)>& getWildcardElement);
|
| 119 |
+
Element* unsafeMakeFreshValue(const Value* v);
|
| 120 |
+
|
| 121 |
+
private:
|
| 122 |
+
const MemoryLocations& getAllContainedMemoryLocations(
|
| 123 |
+
const Element* elem) const;
|
| 124 |
+
void collectAllContainedMemoryLocationsImpl(
|
| 125 |
+
const Element* elem,
|
| 126 |
+
MemoryLocations& cont) const;
|
| 127 |
+
std::vector<std::unique_ptr<Element>> indexToElementMap_;
|
| 128 |
+
};
|
| 129 |
+
|
| 130 |
+
// `Element` represents a vertex in the points-to graph. It represents
|
| 131 |
+
// anything that could have an aliasing relationship--mostly IR
|
| 132 |
+
// `Value`s, but also wildcards or the type inside a container (e.g. `T`
|
| 133 |
+
// in `List[T]`)
|
| 134 |
+
struct Element {
|
| 135 |
+
Element(const Value* value_, unsigned index_);
|
| 136 |
+
// wildcard constructor
|
| 137 |
+
explicit Element(unsigned index_);
|
| 138 |
+
|
| 139 |
+
// Index into the owning DAG's bit vector that represents this element.
|
| 140 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 141 |
+
unsigned index;
|
| 142 |
+
|
| 143 |
+
// All elements that this element *may* point to. It's possible to have
|
| 144 |
+
// multiple elements that you might point to due to control flow/complex ops
|
| 145 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 146 |
+
MemoryLocations pointsTo;
|
| 147 |
+
// Backreference for points-to.
|
| 148 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 149 |
+
MemoryLocations pointedFrom;
|
| 150 |
+
|
| 151 |
+
// Elements can contain other elements (e.g. List[Tensor])
|
| 152 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 153 |
+
MemoryLocations containedElements;
|
| 154 |
+
|
| 155 |
+
// The values that this element corresponds to. May be empty if this element
|
| 156 |
+
// doesn't represent a first-class value.
|
| 157 |
+
// This is for debug information only.
|
| 158 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 159 |
+
std::unordered_set<const Value*> values;
|
| 160 |
+
|
| 161 |
+
private:
|
| 162 |
+
// Make `from` point at `to`.
|
| 163 |
+
void makePointerTo(Element* from, Element* to);
|
| 164 |
+
|
| 165 |
+
friend class MemoryDAG;
|
| 166 |
+
// We memoize the results of `getMemoryLocations` to speed up queries.
|
| 167 |
+
// A nullopt means that this cache is not yet populated. Since `MemoryDAG` is
|
| 168 |
+
// immutable, this cache should never need to be invalidated.
|
| 169 |
+
mutable c10::optional<MemoryLocations> cachedMemoryLocations_;
|
| 170 |
+
|
| 171 |
+
mutable c10::optional<MemoryLocations> cachedAllContainedMemoryLocations_;
|
| 172 |
+
};
|
| 173 |
+
|
| 174 |
+
} // namespace jit
|
| 175 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/analysis.h
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/stmt.h>
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 7 |
+
|
| 8 |
+
#include <utility>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace jit {
|
| 12 |
+
namespace tensorexpr {
|
| 13 |
+
class HasRand : public IRVisitor {
|
| 14 |
+
public:
|
| 15 |
+
HasRand(StmtPtr stmt) : stmt_(std::move(stmt)) {
|
| 16 |
+
stmt_->accept(this);
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
bool has_rand() const {
|
| 20 |
+
return has_rand_;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
private:
|
| 24 |
+
void visit(IntrinsicsPtr v) override {
|
| 25 |
+
if (v->op_type() == IntrinsicsOp::kRand) {
|
| 26 |
+
has_rand_ = true;
|
| 27 |
+
} else {
|
| 28 |
+
IRVisitor::visit(std::move(v));
|
| 29 |
+
}
|
| 30 |
+
}
|
| 31 |
+
StmtPtr stmt_;
|
| 32 |
+
bool has_rand_ = false;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
template <typename Op>
|
| 36 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 37 |
+
class NodeFinder : public IRVisitor {
|
| 38 |
+
public:
|
| 39 |
+
void visit(NodePtr<Op> v) override {
|
| 40 |
+
nodes.push_back((NodePtr<Op>)v);
|
| 41 |
+
IRVisitor::visit(v);
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
static std::vector<NodePtr<Op>> find(StmtPtr s) {
|
| 45 |
+
NodeFinder<Op> nf;
|
| 46 |
+
s->accept(&nf);
|
| 47 |
+
return nf.nodes;
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
static std::vector<NodePtr<Op>> find(ExprPtr e) {
|
| 51 |
+
NodeFinder<Op> nf;
|
| 52 |
+
e->accept(&nf);
|
| 53 |
+
return nf.nodes;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
std::vector<NodePtr<Op>> nodes;
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 60 |
+
class VarFinder : public IRVisitor {
|
| 61 |
+
public:
|
| 62 |
+
void visit(VarPtr v) override {
|
| 63 |
+
vars_.insert(v);
|
| 64 |
+
IRVisitor::visit(std::move(v));
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
static std::unordered_set<VarPtr> find(StmtPtr s) {
|
| 68 |
+
VarFinder nf;
|
| 69 |
+
s->accept(&nf);
|
| 70 |
+
return nf.vars();
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
static std::unordered_set<VarPtr> find(ExprPtr e) {
|
| 74 |
+
VarFinder nf;
|
| 75 |
+
e->accept(&nf);
|
| 76 |
+
return nf.vars();
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
const std::unordered_set<VarPtr>& vars() {
|
| 80 |
+
return vars_;
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
private:
|
| 84 |
+
std::unordered_set<VarPtr> vars_;
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
class BufFinder : public IRVisitor {
|
| 88 |
+
public:
|
| 89 |
+
void visit(BufPtr v) override {
|
| 90 |
+
bufs_.insert(v);
|
| 91 |
+
IRVisitor::visit(std::move(v));
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
static std::unordered_set<BufPtr> find(StmtPtr s) {
|
| 95 |
+
BufFinder nf;
|
| 96 |
+
s->accept(&nf);
|
| 97 |
+
return nf.bufs();
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
static std::unordered_set<BufPtr> find(ExprPtr e) {
|
| 101 |
+
BufFinder nf;
|
| 102 |
+
e->accept(&nf);
|
| 103 |
+
return nf.bufs();
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
const std::unordered_set<BufPtr>& bufs() {
|
| 107 |
+
return bufs_;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
private:
|
| 111 |
+
std::unordered_set<BufPtr> bufs_;
|
| 112 |
+
};
|
| 113 |
+
|
| 114 |
+
// Finds all kinds of write operations to the provided Buf.
|
| 115 |
+
class WritesToBuf : public IRVisitor {
|
| 116 |
+
public:
|
| 117 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 118 |
+
WritesToBuf(BufPtr target) : target_(std::move(target)) {}
|
| 119 |
+
|
| 120 |
+
std::vector<StmtPtr> writes() {
|
| 121 |
+
return writes_;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
static std::vector<StmtPtr> find(StmtPtr s, BufPtr b) {
|
| 125 |
+
WritesToBuf finder(std::move(b));
|
| 126 |
+
s->accept(&finder);
|
| 127 |
+
return finder.writes();
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
private:
|
| 131 |
+
void visit(StorePtr v) override {
|
| 132 |
+
if (v->buf() == target_) {
|
| 133 |
+
writes_.push_back(v);
|
| 134 |
+
}
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
void visit(AtomicAddPtr v) override {
|
| 138 |
+
if (v->buf() == target_) {
|
| 139 |
+
writes_.push_back(v);
|
| 140 |
+
}
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
BufPtr target_;
|
| 144 |
+
std::vector<StmtPtr> writes_;
|
| 145 |
+
};
|
| 146 |
+
|
| 147 |
+
class StmtsReadingBuf : public IRVisitor {
|
| 148 |
+
public:
|
| 149 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 150 |
+
StmtsReadingBuf(BufPtr target) : target_(std::move(target)) {}
|
| 151 |
+
|
| 152 |
+
std::vector<StmtPtr> reads() {
|
| 153 |
+
return reads_;
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
static std::vector<StmtPtr> find(StmtPtr s, BufPtr b) {
|
| 157 |
+
StmtsReadingBuf finder(std::move(b));
|
| 158 |
+
s->accept(&finder);
|
| 159 |
+
return finder.reads();
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
private:
|
| 163 |
+
bool readsBuffer(StmtPtr s) {
|
| 164 |
+
auto loads = NodeFinder<Load>::find(std::move(s));
|
| 165 |
+
for (const auto& l : loads) {
|
| 166 |
+
if (l->buf() == target_) {
|
| 167 |
+
return true;
|
| 168 |
+
}
|
| 169 |
+
}
|
| 170 |
+
return false;
|
| 171 |
+
}
|
| 172 |
+
|
| 173 |
+
void visit(StorePtr v) override {
|
| 174 |
+
if (readsBuffer(v)) {
|
| 175 |
+
reads_.push_back(v);
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
void visit(LetPtr v) override {
|
| 180 |
+
if (readsBuffer(v)) {
|
| 181 |
+
reads_.push_back(v);
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
void visit(CondPtr v) override {
|
| 186 |
+
if (readsBuffer(v)) {
|
| 187 |
+
reads_.push_back(v);
|
| 188 |
+
}
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
void visit(AtomicAddPtr v) override {
|
| 192 |
+
if (readsBuffer(v)) {
|
| 193 |
+
reads_.push_back(v);
|
| 194 |
+
}
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
BufPtr target_;
|
| 198 |
+
std::vector<StmtPtr> reads_;
|
| 199 |
+
};
|
| 200 |
+
|
| 201 |
+
class ExternalAllocBufFinder : public IRVisitor {
|
| 202 |
+
public:
|
| 203 |
+
void visit(ExternalCallWithAllocPtr v) override {
|
| 204 |
+
const auto& bufs_out = v->buf_out_args();
|
| 205 |
+
bufs_.insert(bufs_out.begin(), bufs_out.end());
|
| 206 |
+
IRVisitor::visit(std::move(v));
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
static std::unordered_set<BufPtr> find(StmtPtr s) {
|
| 210 |
+
ExternalAllocBufFinder f;
|
| 211 |
+
s->accept(&f);
|
| 212 |
+
return f.bufs();
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
static std::unordered_set<BufPtr> find(ExprPtr e) {
|
| 216 |
+
ExternalAllocBufFinder f;
|
| 217 |
+
e->accept(&f);
|
| 218 |
+
return f.bufs();
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
const std::unordered_set<BufPtr>& bufs() {
|
| 222 |
+
return bufs_;
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
private:
|
| 226 |
+
std::unordered_set<BufPtr> bufs_;
|
| 227 |
+
};
|
| 228 |
+
|
| 229 |
+
// Traverses the IR to determine if a particular Var is modified within it.
|
| 230 |
+
class ModifiesVarChecker : public IRVisitor {
|
| 231 |
+
public:
|
| 232 |
+
ModifiesVarChecker(VarPtr v) : var_(std::move(v)) {}
|
| 233 |
+
|
| 234 |
+
static bool check(StmtPtr s, VarPtr v) {
|
| 235 |
+
ModifiesVarChecker checker(std::move(v));
|
| 236 |
+
s->accept(&checker);
|
| 237 |
+
return checker.found();
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
bool found() {
|
| 241 |
+
return found_;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
private:
|
| 245 |
+
void visit(StorePtr v) override {
|
| 246 |
+
if (v->buf()->base_handle() == var_) {
|
| 247 |
+
found_ = true;
|
| 248 |
+
return;
|
| 249 |
+
}
|
| 250 |
+
IRVisitor::visit(std::move(v));
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
void visit(AtomicAddPtr v) override {
|
| 254 |
+
if (v->buf()->base_handle() == var_) {
|
| 255 |
+
found_ = true;
|
| 256 |
+
return;
|
| 257 |
+
}
|
| 258 |
+
IRVisitor::visit(std::move(v));
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
void visit(LetPtr v) override {
|
| 262 |
+
if (v->var() == var_) {
|
| 263 |
+
found_ = true;
|
| 264 |
+
return;
|
| 265 |
+
}
|
| 266 |
+
IRVisitor::visit(std::move(v));
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
void visit(ForPtr v) override {
|
| 270 |
+
if (v->var() == var_) {
|
| 271 |
+
found_ = true;
|
| 272 |
+
return;
|
| 273 |
+
}
|
| 274 |
+
IRVisitor::visit(std::move(v));
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
VarPtr var_;
|
| 278 |
+
bool found_{false};
|
| 279 |
+
};
|
| 280 |
+
|
| 281 |
+
// Traverse the Block stmt to identify the live range of the specified buf. The
|
| 282 |
+
// live range, indicated by a pair of integers, specifies the first and last
|
| 283 |
+
// stmt in block stmts that access to the buf.
|
| 284 |
+
class BufLiveRange : public IRVisitor {
|
| 285 |
+
public:
|
| 286 |
+
BufLiveRange(BufPtr b) : buf_(std::move(b)) {}
|
| 287 |
+
|
| 288 |
+
static std::tuple<int32_t, int32_t> liveRange(StmtPtr s, BufPtr b) {
|
| 289 |
+
BlockPtr block = to<Block>(std::move(s));
|
| 290 |
+
// We Only analyze buffer live ranges for block stmts.
|
| 291 |
+
if (!block) {
|
| 292 |
+
return std::make_tuple(0, 0);
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
BufLiveRange analyzer(std::move(b));
|
| 296 |
+
block->accept(&analyzer);
|
| 297 |
+
return analyzer.getLiveRange();
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
private:
|
| 301 |
+
std::tuple<int32_t, int32_t> getLiveRange() {
|
| 302 |
+
return std::make_tuple(begin_, end_);
|
| 303 |
+
}
|
| 304 |
+
|
| 305 |
+
bool hasBufReads(StmtPtr s) {
|
| 306 |
+
auto loads1 = NodeFinder<Load>::find(s);
|
| 307 |
+
for (const auto& l : loads1) {
|
| 308 |
+
if (l->buf() == buf_) {
|
| 309 |
+
return true;
|
| 310 |
+
}
|
| 311 |
+
}
|
| 312 |
+
auto loads2 = NodeFinder<ExternalCall>::find(s);
|
| 313 |
+
for (const auto& l : loads2) {
|
| 314 |
+
for (const auto& lb : l->buf_args()) {
|
| 315 |
+
if (lb == buf_) {
|
| 316 |
+
return true;
|
| 317 |
+
}
|
| 318 |
+
}
|
| 319 |
+
}
|
| 320 |
+
auto loads3 = NodeFinder<ExternalCallWithAlloc>::find(std::move(s));
|
| 321 |
+
for (const auto& l : loads3) {
|
| 322 |
+
for (const auto& lb : l->buf_args()) {
|
| 323 |
+
if (lb == buf_) {
|
| 324 |
+
return true;
|
| 325 |
+
}
|
| 326 |
+
}
|
| 327 |
+
}
|
| 328 |
+
return false;
|
| 329 |
+
}
|
| 330 |
+
|
| 331 |
+
bool hasBufWrites(StmtPtr s) {
|
| 332 |
+
auto writes1 = NodeFinder<Store>::find(s);
|
| 333 |
+
for (const auto& w : writes1) {
|
| 334 |
+
if (w->buf() == buf_) {
|
| 335 |
+
return true;
|
| 336 |
+
}
|
| 337 |
+
}
|
| 338 |
+
auto writes2 = NodeFinder<ExternalCall>::find(s);
|
| 339 |
+
for (const auto& w : writes2) {
|
| 340 |
+
if (w->buf() == buf_) {
|
| 341 |
+
return true;
|
| 342 |
+
}
|
| 343 |
+
}
|
| 344 |
+
auto writes3 = NodeFinder<ExternalCallWithAlloc>::find(std::move(s));
|
| 345 |
+
for (const auto& w : writes3) {
|
| 346 |
+
for (const auto& wb : w->buf_out_args()) {
|
| 347 |
+
if (wb == buf_) {
|
| 348 |
+
return true;
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
}
|
| 352 |
+
return false;
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
void findAccAndUpdateLiveRange(StmtPtr s) {
|
| 356 |
+
bool has_reads = hasBufReads(s), has_writes = hasBufWrites(std::move(s));
|
| 357 |
+
if (has_reads || has_writes) {
|
| 358 |
+
if (begin_ == -1) {
|
| 359 |
+
begin_ = curr_index_;
|
| 360 |
+
};
|
| 361 |
+
end_ = curr_index_;
|
| 362 |
+
}
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
void visit(BlockPtr v) override {
|
| 366 |
+
for (const StmtPtr& s : *v) {
|
| 367 |
+
curr_index_ += 1;
|
| 368 |
+
findAccAndUpdateLiveRange(s);
|
| 369 |
+
}
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
BufPtr buf_;
|
| 373 |
+
int32_t begin_ = -1;
|
| 374 |
+
int32_t end_ = -1;
|
| 375 |
+
int32_t curr_index_ = -1;
|
| 376 |
+
};
|
| 377 |
+
|
| 378 |
+
// A class that analyzes the given program relevant for Block backend
|
| 379 |
+
// It creates a map of multi dim buffers and their flat versions
|
| 380 |
+
class CreateBufferMap : public IRVisitor {
|
| 381 |
+
public:
|
| 382 |
+
const std::unordered_map<std::string, BufPtr>& getBufferMap() const {
|
| 383 |
+
return map_input_to_tensor_bufs_;
|
| 384 |
+
}
|
| 385 |
+
|
| 386 |
+
private:
|
| 387 |
+
void visit(StorePtr v) override {
|
| 388 |
+
auto load_node = to<Load>(v->value());
|
| 389 |
+
if (load_node) {
|
| 390 |
+
auto t_buf = load_node->buf();
|
| 391 |
+
map_input_to_tensor_bufs_.emplace(t_buf->name_hint(), v->buf());
|
| 392 |
+
} else {
|
| 393 |
+
auto add_node = to<Add>(v->value());
|
| 394 |
+
auto mul_node = to<Mul>(v->value());
|
| 395 |
+
// This means for now, v->value() can be Add or Mul
|
| 396 |
+
TORCH_INTERNAL_ASSERT(add_node || mul_node, buildErrorMessage());
|
| 397 |
+
map_input_to_tensor_bufs_.emplace(v->buf()->name_hint(), v->buf());
|
| 398 |
+
}
|
| 399 |
+
v->value()->accept(this);
|
| 400 |
+
}
|
| 401 |
+
std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
|
| 402 |
+
};
|
| 403 |
+
|
| 404 |
+
} // namespace tensorexpr
|
| 405 |
+
} // namespace jit
|
| 406 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/block_codegen.h
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <string>
|
| 4 |
+
#include <unordered_map>
|
| 5 |
+
#include <unordered_set>
|
| 6 |
+
#include <utility>
|
| 7 |
+
|
| 8 |
+
#include <ATen/ATen.h>
|
| 9 |
+
#include <torch/csrc/jit/resource_guard.h>
|
| 10 |
+
#include <torch/csrc/jit/tensorexpr/analysis.h>
|
| 11 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 12 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 13 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 14 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 15 |
+
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
|
| 16 |
+
|
| 17 |
+
namespace torch {
|
| 18 |
+
namespace jit {
|
| 19 |
+
namespace tensorexpr {
|
| 20 |
+
|
| 21 |
+
// A class that analyzes the given program relevant for Block backend.
|
| 22 |
+
class BlockAnalysis : public IRVisitor {
|
| 23 |
+
public:
|
| 24 |
+
bool is_buf_store_target(BufPtr buf) const {
|
| 25 |
+
return store_targets_.count(buf) > 0;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
const std::unordered_set<BufPtr>& loads() const {
|
| 29 |
+
return loads_;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
const std::unordered_set<BufPtr>& stores() const {
|
| 33 |
+
return store_targets_;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
int block_size() const {
|
| 37 |
+
return block_size_;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
bool areBufsInMap(const std::unordered_set<BufPtr>& bufs) const;
|
| 41 |
+
|
| 42 |
+
BufPtr getMultiDimBuf(BufPtr buf) const;
|
| 43 |
+
|
| 44 |
+
std::string getInputName(BufPtr buf) const;
|
| 45 |
+
|
| 46 |
+
std::string getFlatInputName(BufPtr buf) const {
|
| 47 |
+
return getInputName(std::move(buf)) + "_flat";
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
std::unordered_map<std::string, BufPtr> getBufferMap() const {
|
| 51 |
+
return map_input_to_tensor_bufs_;
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
private:
|
| 55 |
+
void visit(StorePtr v) override;
|
| 56 |
+
void visit(LoadPtr v) override;
|
| 57 |
+
void visit(ForPtr v) override;
|
| 58 |
+
|
| 59 |
+
std::unordered_map<std::string, BufPtr> map_input_to_tensor_bufs_;
|
| 60 |
+
std::unordered_set<BufPtr> store_targets_;
|
| 61 |
+
std::unordered_set<BufPtr> loads_;
|
| 62 |
+
int block_size_ = 32;
|
| 63 |
+
};
|
| 64 |
+
|
| 65 |
+
// A class that overrides the underlying IRPrinter to produce Block.
|
| 66 |
+
class BlockPrinter : public IRPrinter {
|
| 67 |
+
public:
|
| 68 |
+
BlockPrinter(std::ostream* os, BlockAnalysis* block_analysis)
|
| 69 |
+
: IRPrinter(*os), block_analysis_(block_analysis) {}
|
| 70 |
+
|
| 71 |
+
using IRPrinter::name_manager;
|
| 72 |
+
using IRPrinter::visit;
|
| 73 |
+
|
| 74 |
+
private:
|
| 75 |
+
BlockAnalysis* block_analysis_;
|
| 76 |
+
std::unordered_map<std::string, int> dim_values_map;
|
| 77 |
+
std::vector<std::string> dim_names = {"N", "H", "W", "C"};
|
| 78 |
+
std::vector<std::string> flat_dim_names = {"N", "NH", "NHW", "NHWC"};
|
| 79 |
+
void PrintTensorInfo(const std::unordered_set<BufPtr>& bufs);
|
| 80 |
+
void PrintArguments(const std::unordered_set<BufPtr>& bufs);
|
| 81 |
+
void PrintBufferInfo(const std::unordered_set<BufPtr>& bufs);
|
| 82 |
+
void PrintDistribution(const std::unordered_set<BufPtr>& bufs);
|
| 83 |
+
void PrintLoop(const std::unordered_set<BufPtr>& bufs, bool block_idx = true);
|
| 84 |
+
void PrintReshapeInfo(
|
| 85 |
+
const std::unordered_set<BufPtr>& bufs,
|
| 86 |
+
bool reverse = false);
|
| 87 |
+
void PrintDMAs(const std::unordered_set<BufPtr>& bufs);
|
| 88 |
+
void PrintAdjustBuffers(const std::unordered_set<BufPtr>& bufs);
|
| 89 |
+
|
| 90 |
+
void visit(ForPtr v) override;
|
| 91 |
+
void visit(LoadPtr v) override;
|
| 92 |
+
void visit(StorePtr v) override;
|
| 93 |
+
void visit(BlockPtr v) override;
|
| 94 |
+
void visit(AddPtr v) override;
|
| 95 |
+
void visit(MulPtr v) override;
|
| 96 |
+
};
|
| 97 |
+
|
| 98 |
+
class TORCH_API BlockCodeGen : public CodeGen {
|
| 99 |
+
public:
|
| 100 |
+
template <typename... Ts>
|
| 101 |
+
/* implicit */
|
| 102 |
+
BlockCodeGen(StmtPtr stmt, Ts... ts)
|
| 103 |
+
: CodeGen(
|
| 104 |
+
stmt,
|
| 105 |
+
std::vector<BufferArg>({BufferArg(ts)...}),
|
| 106 |
+
at::Device(at::kCPU)) {
|
| 107 |
+
Initialize();
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
BlockCodeGen(
|
| 111 |
+
StmtPtr stmt,
|
| 112 |
+
const std::vector<BufferArg>& buffer_args,
|
| 113 |
+
at::Device device = at::Device(at::kCPU),
|
| 114 |
+
const std::string& kernel_func_name = "func")
|
| 115 |
+
: CodeGen(stmt, buffer_args, device, kernel_func_name) {
|
| 116 |
+
Initialize();
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
~BlockCodeGen() override;
|
| 120 |
+
|
| 121 |
+
void call(const std::vector<CallArg>& args) override;
|
| 122 |
+
void call_raw(const std::vector<void*>& args) override;
|
| 123 |
+
|
| 124 |
+
void Initialize();
|
| 125 |
+
|
| 126 |
+
std::string getCodeText(const std::string& attr = "") override {
|
| 127 |
+
return oss_.str();
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
private:
|
| 131 |
+
UniqueNameManager* name_manager() {
|
| 132 |
+
if (!printer_) {
|
| 133 |
+
throw std::runtime_error("Null IRPrinter is not expected");
|
| 134 |
+
}
|
| 135 |
+
return printer_->name_manager();
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
std::ostream& os() {
|
| 139 |
+
return printer_->os();
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
std::ostringstream oss_;
|
| 143 |
+
std::unique_ptr<BlockPrinter> printer_;
|
| 144 |
+
std::unique_ptr<BlockAnalysis> block_analysis_;
|
| 145 |
+
|
| 146 |
+
std::string GetUniqueFuncName(const std::string& func_prefix);
|
| 147 |
+
};
|
| 148 |
+
} // namespace tensorexpr
|
| 149 |
+
} // namespace jit
|
| 150 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_inference.h
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <map>
|
| 4 |
+
#include <unordered_map>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
#include <torch/csrc/Export.h>
|
| 8 |
+
#include <torch/csrc/jit/tensorexpr/mem_dependency_checker.h>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace jit {
|
| 12 |
+
namespace tensorexpr {
|
| 13 |
+
|
| 14 |
+
class Expr;
|
| 15 |
+
class Buf;
|
| 16 |
+
class Stmt;
|
| 17 |
+
|
| 18 |
+
enum C10_API_ENUM TensorAccessKind { kLoad, kStore, kMutate };
|
| 19 |
+
|
| 20 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 21 |
+
struct TORCH_API TensorAccessBoundsInfo {
|
| 22 |
+
TensorAccessKind kind;
|
| 23 |
+
std::vector<ExprPtr> start;
|
| 24 |
+
std::vector<ExprPtr> stop;
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
using BoundsInfo =
|
| 28 |
+
std::unordered_map<BufPtr, std::vector<TensorAccessBoundsInfo>>;
|
| 29 |
+
|
| 30 |
+
TORCH_API BoundsInfo inferBounds(StmtPtr s, bool distinctAccessKinds = true);
|
| 31 |
+
|
| 32 |
+
// Bounds inference caching the analysis. The MemDependencyChecker must already
|
| 33 |
+
// have been run.
|
| 34 |
+
TORCH_API BoundsInfo getInferredBounds(
|
| 35 |
+
analysis::MemDependencyChecker& analyzer,
|
| 36 |
+
StmtPtr s,
|
| 37 |
+
bool distinctAccessKinds = true);
|
| 38 |
+
TORCH_API BoundsInfo getInferredBounds(
|
| 39 |
+
analysis::MemDependencyChecker& analyzer,
|
| 40 |
+
ExprPtr e,
|
| 41 |
+
bool distinctAccessKinds = true);
|
| 42 |
+
|
| 43 |
+
TORCH_API void printBoundsInfo(const BoundsInfo& v);
|
| 44 |
+
|
| 45 |
+
TORCH_API std::vector<ExprPtr> getBoundExtents(
|
| 46 |
+
const std::vector<TensorAccessBoundsInfo>& infos);
|
| 47 |
+
|
| 48 |
+
// The kind of dependency found, in increasing order of exclusivity.
|
| 49 |
+
enum class HazardKind {
|
| 50 |
+
ReadAfterWrite,
|
| 51 |
+
WriteAfterRead,
|
| 52 |
+
WriteAfterWrite,
|
| 53 |
+
NoDependency,
|
| 54 |
+
};
|
| 55 |
+
TORCH_API HazardKind getPotentialHazards(
|
| 56 |
+
analysis::MemDependencyChecker& analyzer,
|
| 57 |
+
StmtPtr A,
|
| 58 |
+
StmtPtr B);
|
| 59 |
+
|
| 60 |
+
// Returns true if there is a conflicting overlap between accesses in
|
| 61 |
+
// statements A and B. A conflicting overlap is an overlap in buffer accesses
|
| 62 |
+
// where at least one of the accesses is a Store.
|
| 63 |
+
TORCH_API bool hasConflictingOverlap(
|
| 64 |
+
analysis::MemDependencyChecker& analyzer,
|
| 65 |
+
StmtPtr A,
|
| 66 |
+
StmtPtr B);
|
| 67 |
+
// Same as above, between accesses in stores S1 and S2.
|
| 68 |
+
TORCH_API bool isOverlapping(
|
| 69 |
+
analysis::MemDependencyChecker& analyzer,
|
| 70 |
+
StorePtr S1,
|
| 71 |
+
StorePtr S2);
|
| 72 |
+
// Same as above, between accesses in store S and load L.
|
| 73 |
+
TORCH_API bool isOverlapping(
|
| 74 |
+
analysis::MemDependencyChecker& analyzer,
|
| 75 |
+
StorePtr S,
|
| 76 |
+
LoadPtr L);
|
| 77 |
+
|
| 78 |
+
} // namespace tensorexpr
|
| 79 |
+
} // namespace jit
|
| 80 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/bounds_overlap.h
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/expr.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 5 |
+
|
| 6 |
+
#include <deque>
|
| 7 |
+
#include <utility>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace jit {
|
| 12 |
+
namespace tensorexpr {
|
| 13 |
+
namespace analysis {
|
| 14 |
+
|
| 15 |
+
// A simple class containing the start and end of a range in a single dimension.
|
| 16 |
+
struct TORCH_API Bound {
|
| 17 |
+
ExprPtr start{nullptr};
|
| 18 |
+
ExprPtr end{nullptr};
|
| 19 |
+
|
| 20 |
+
// This stores whether or not the start and end of this Bound have previously
|
| 21 |
+
// been swapped. This occurs when the bound is in a loop with a negative
|
| 22 |
+
// stride.
|
| 23 |
+
bool swapped{false};
|
| 24 |
+
|
| 25 |
+
Bound() = default;
|
| 26 |
+
Bound(ExprPtr s, ExprPtr e) : start(std::move(s)), end(std::move(e)) {}
|
| 27 |
+
|
| 28 |
+
void print() const;
|
| 29 |
+
bool equals(const Bound& other) const;
|
| 30 |
+
|
| 31 |
+
// The comparison operators are conservative. If the compare operator returns
|
| 32 |
+
// true, it means that all the elements satisfy the logical expression. But
|
| 33 |
+
// the false does not mean the opposite comparison is satisfied. It could be
|
| 34 |
+
// but not always.
|
| 35 |
+
bool operator==(const Bound& other) const;
|
| 36 |
+
bool operator!=(const Bound& other) const;
|
| 37 |
+
bool operator<(const Bound& other) const;
|
| 38 |
+
bool operator<=(const Bound& other) const;
|
| 39 |
+
bool operator>(const Bound& other) const;
|
| 40 |
+
bool operator>=(const Bound& other) const;
|
| 41 |
+
|
| 42 |
+
void swap() {
|
| 43 |
+
std::swap(start, end);
|
| 44 |
+
swapped = !swapped;
|
| 45 |
+
}
|
| 46 |
+
};
|
| 47 |
+
|
| 48 |
+
struct BoundHash {
|
| 49 |
+
size_t operator()(const Bound& b) const {
|
| 50 |
+
return std::hash<ExprPtr>()(b.start) ^ std::hash<ExprPtr>()(b.end);
|
| 51 |
+
}
|
| 52 |
+
};
|
| 53 |
+
|
| 54 |
+
// The type of overlap found. Each condition is true only if none of the
|
| 55 |
+
// previous conditions hold.
|
| 56 |
+
// ContainedOrEqual: All elements in the Bound A are in the Bound B (this
|
| 57 |
+
// includes the case where the bounds are equal).
|
| 58 |
+
// Contains: All elements in the Bound B are in the Bound B.
|
| 59 |
+
// PartialOverlap: Any elements in the Bound B are in the Bound A.
|
| 60 |
+
// NoOverlap: No elements in the Bound A are in the bound B.
|
| 61 |
+
enum class OverlapKind {
|
| 62 |
+
ContainedOrEqual,
|
| 63 |
+
Contains,
|
| 64 |
+
PartialOverlap,
|
| 65 |
+
NoOverlap
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
// The Bound comparison result.
|
| 69 |
+
// True: Every Bound element always satisfies the given comparison operator
|
| 70 |
+
// False: Every Bound element always does NOT satisfy the given comparison
|
| 71 |
+
// operator
|
| 72 |
+
// NotDetermined: Some elements satisfy the given comparison operator and
|
| 73 |
+
// some elements not
|
| 74 |
+
enum class CmpEvalResult { True, False, NotDetermined };
|
| 75 |
+
|
| 76 |
+
// Returns the kind of overlap between Bound A and Bound A in a single
|
| 77 |
+
// dimension.
|
| 78 |
+
OverlapKind TORCH_API boundOverlap(Bound A, Bound B);
|
| 79 |
+
|
| 80 |
+
// The comparison is conservative and the compare result is deterministic.
|
| 81 |
+
// It means that every element of the Bound to be compared needs to satisfy
|
| 82 |
+
// the given comparison operator.
|
| 83 |
+
CmpEvalResult TORCH_API compareBound(
|
| 84 |
+
const Bound& a,
|
| 85 |
+
const Bound& b,
|
| 86 |
+
const CompareSelectOperation& cmp_op);
|
| 87 |
+
|
| 88 |
+
// A multi dimensional bound representing the bound of a set of indices.
|
| 89 |
+
using IndexBounds = std::vector<Bound>;
|
| 90 |
+
|
| 91 |
+
// Returns true if two IndexBounds are equivalent.
|
| 92 |
+
bool TORCH_API indexBoundsEquals(const IndexBounds& A, const IndexBounds& B);
|
| 93 |
+
|
| 94 |
+
// Flattens a multi dimensional bound to a single dimension. The IndexBounds "a"
|
| 95 |
+
// *must* encapsulate the entire range of the buffer.
|
| 96 |
+
Bound TORCH_API flattenBounds(const IndexBounds& a);
|
| 97 |
+
|
| 98 |
+
// Determines the kind of overlap in X dimensions.
|
| 99 |
+
OverlapKind TORCH_API overlaps(const IndexBounds& a, const IndexBounds& b);
|
| 100 |
+
|
| 101 |
+
// Returns the Bound slices created by subtracing bound B from bound A.
|
| 102 |
+
// Multiple Bounds can be returned in the case where B slices A into two
|
| 103 |
+
// distinct regions with no overlap.
|
| 104 |
+
//
|
| 105 |
+
// For example:
|
| 106 |
+
// subtractBound((0, 10), (2, 4)) => [(0, 1), (5, 10)]
|
| 107 |
+
// bound A: (0, 10)
|
| 108 |
+
// bound B: (2, 4)
|
| 109 |
+
// If we remove slice (2, 4) from the slice (0, 10), we will be left
|
| 110 |
+
// with 2 slices, one at the start (0, 1), and one at the end (5, 10).
|
| 111 |
+
// So, the result of this subtraction is [(0, 1), (5, 10)].
|
| 112 |
+
//
|
| 113 |
+
// Note: this doesn't use IndexBounds because the Bounds returned do not
|
| 114 |
+
// represent multiple different dimensions.
|
| 115 |
+
std::vector<Bound> TORCH_API subtractBound(Bound a, Bound b);
|
| 116 |
+
|
| 117 |
+
// Returns the bound slices created by subtracting the IndexBounds B from A.
|
| 118 |
+
std::vector<IndexBounds> TORCH_API subtractIndicesBounds(
|
| 119 |
+
const IndexBounds& A,
|
| 120 |
+
const IndexBounds& B,
|
| 121 |
+
OverlapKind overlap);
|
| 122 |
+
std::vector<IndexBounds> TORCH_API
|
| 123 |
+
subtractIndicesBounds(const IndexBounds& A, const IndexBounds& B);
|
| 124 |
+
|
| 125 |
+
} // namespace analysis
|
| 126 |
+
} // namespace tensorexpr
|
| 127 |
+
} // namespace jit
|
| 128 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/codegen.h
ADDED
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 6 |
+
|
| 7 |
+
#include <utility>
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace jit {
|
| 11 |
+
namespace tensorexpr {
|
| 12 |
+
|
| 13 |
+
template <typename T>
|
| 14 |
+
class PaddedBuffer;
|
| 15 |
+
|
| 16 |
+
class TORCH_API CodeGen {
|
| 17 |
+
public:
|
| 18 |
+
class BufferArg;
|
| 19 |
+
class CallArg;
|
| 20 |
+
|
| 21 |
+
template <typename... Ts>
|
| 22 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 23 |
+
CodeGen(StmtPtr stmt, Ts... ts)
|
| 24 |
+
: stmt_(std::move(stmt)), buffer_args_({BufferArg(ts)...}) {}
|
| 25 |
+
|
| 26 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 27 |
+
CodeGen(
|
| 28 |
+
StmtPtr stmt,
|
| 29 |
+
std::vector<BufferArg> buffer_args,
|
| 30 |
+
at::Device device = at::kCPU,
|
| 31 |
+
std::string kernel_func_name = "func");
|
| 32 |
+
|
| 33 |
+
virtual ~CodeGen() = default;
|
| 34 |
+
|
| 35 |
+
StmtPtr stmt() const {
|
| 36 |
+
return stmt_;
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
void set_stmt(StmtPtr s) {
|
| 40 |
+
stmt_ = s;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
void apply_mutator(IRMutator* mutator) {
|
| 44 |
+
stmt_ = stmt_->accept_mutator(mutator);
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
void apply_visitor(IRVisitor* visitor) {
|
| 48 |
+
stmt_->accept(visitor);
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
std::vector<BufferArg>& buffer_args() {
|
| 52 |
+
return buffer_args_;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
const std::vector<BufferArg>& buffer_args() const {
|
| 56 |
+
return buffer_args_;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
at::Device device() {
|
| 60 |
+
return device_;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
// This function returns the generated code as
|
| 64 |
+
// a string.
|
| 65 |
+
virtual std::string getCodeText(const std::string& attr = "") {
|
| 66 |
+
return ("");
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// TODO: Figure out how to unify these call interfaces.
|
| 70 |
+
|
| 71 |
+
/// Call a function with a vector of CallArgs, which are tagged
|
| 72 |
+
/// unions that properly type the arguments.
|
| 73 |
+
virtual void call(const std::vector<CallArg>& args) = 0;
|
| 74 |
+
|
| 75 |
+
/// Call a function faster than a regular `call` by assuming that
|
| 76 |
+
/// the generated kernel already knows the type of the arguments, so
|
| 77 |
+
/// they can be type-punned with `void*`s.
|
| 78 |
+
virtual void call_raw(const std::vector<void*>& args) = 0;
|
| 79 |
+
|
| 80 |
+
/// Call a function even faster than a regular call, by assuming
|
| 81 |
+
/// that the number of thread blocks can be derived from `numel` via
|
| 82 |
+
/// a simple division, rather than evaluating an expression.
|
| 83 |
+
virtual void call_with_numel(void** args, int64_t numel);
|
| 84 |
+
|
| 85 |
+
virtual at::Tensor empty_strided(
|
| 86 |
+
c10::IntArrayRef size,
|
| 87 |
+
c10::IntArrayRef stride,
|
| 88 |
+
c10::optional<c10::ScalarType> dtype_opt,
|
| 89 |
+
c10::optional<c10::Layout> layout_opt,
|
| 90 |
+
c10::optional<c10::Device> device_opt,
|
| 91 |
+
c10::optional<bool> pin_memory_opt) {
|
| 92 |
+
return at::empty_strided(
|
| 93 |
+
size, stride, dtype_opt, layout_opt, device_opt, pin_memory_opt);
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
const std::string& kernel_func_name() const {
|
| 97 |
+
return kernel_func_name_;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
void allocIntermediateBufs();
|
| 101 |
+
|
| 102 |
+
protected:
|
| 103 |
+
static void* argToPtr(const BufferArg& bufferArg, const CallArg& callArg);
|
| 104 |
+
|
| 105 |
+
private:
|
| 106 |
+
StmtPtr stmt_;
|
| 107 |
+
std::vector<BufferArg> buffer_args_;
|
| 108 |
+
at::Device device_ = at::kCPU;
|
| 109 |
+
std::string kernel_func_name_ = "func";
|
| 110 |
+
};
|
| 111 |
+
|
| 112 |
+
class TORCH_API ExtCallMemoryReuse : public IRMutator {
|
| 113 |
+
static std::unordered_map<std::string, std::string> makeExtCallFuncNameMap();
|
| 114 |
+
static const std::unordered_map<std::string, std::string> extCallFuncNameMap_;
|
| 115 |
+
|
| 116 |
+
public:
|
| 117 |
+
explicit ExtCallMemoryReuse(
|
| 118 |
+
const std::vector<CodeGen::BufferArg>& bufferArgs);
|
| 119 |
+
~ExtCallMemoryReuse() override = default;
|
| 120 |
+
StmtPtr mutate(ExternalCallPtr v) override;
|
| 121 |
+
|
| 122 |
+
private:
|
| 123 |
+
std::unordered_set<BufPtr> bufferArgs_;
|
| 124 |
+
};
|
| 125 |
+
|
| 126 |
+
class CodeGen::BufferArg {
|
| 127 |
+
public:
|
| 128 |
+
BufferArg(const Tensor& tensor) : buf_(tensor.buf()) {}
|
| 129 |
+
BufferArg(const VarHandle& var) : var_(var.node()), isVar_(true) {}
|
| 130 |
+
BufferArg(const BufHandle& buf) : buf_(buf.node()) {}
|
| 131 |
+
BufferArg(BufPtr buf) : buf_(std::move(buf)) {}
|
| 132 |
+
|
| 133 |
+
VarPtr var() const {
|
| 134 |
+
return isVar_ ? var_ : buf_->base_handle();
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
BufPtr buf() const {
|
| 138 |
+
return buf_;
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
bool isVar() const {
|
| 142 |
+
return isVar_;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
Dtype dtype() const {
|
| 146 |
+
return isVar_ ? var_->dtype() : buf_->dtype();
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
private:
|
| 150 |
+
VarPtr var_ = nullptr;
|
| 151 |
+
BufPtr buf_ = nullptr;
|
| 152 |
+
bool isVar_ = false;
|
| 153 |
+
};
|
| 154 |
+
|
| 155 |
+
class CodeGen::CallArg {
|
| 156 |
+
public:
|
| 157 |
+
template <typename T>
|
| 158 |
+
CallArg(const PaddedBuffer<T>& buffer);
|
| 159 |
+
|
| 160 |
+
template <typename T>
|
| 161 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,cppcoreguidelines-pro-type-const-cast)
|
| 162 |
+
CallArg(const std::vector<T>& buffer)
|
| 163 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
| 164 |
+
: data_(const_cast<T*>(buffer.data())) {}
|
| 165 |
+
|
| 166 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 167 |
+
CallArg(void* ptr) : data_(ptr) {}
|
| 168 |
+
|
| 169 |
+
#define ARG_TYPE_CTOR(Type, Name) \
|
| 170 |
+
CallArg(Type v) { \
|
| 171 |
+
memcpy(buffer_, &v, sizeof(Type)); \
|
| 172 |
+
data_ = (void*)buffer_; \
|
| 173 |
+
}
|
| 174 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 175 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_TYPE_CTOR);
|
| 176 |
+
#undef ARG_TYPE_CTOR
|
| 177 |
+
|
| 178 |
+
void* data() const {
|
| 179 |
+
return data_;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
CallArg(const CallArg& rhs) {
|
| 183 |
+
if (rhs.data_ == rhs.buffer_) {
|
| 184 |
+
memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_));
|
| 185 |
+
this->data_ = (void*)(this->buffer_);
|
| 186 |
+
} else {
|
| 187 |
+
this->data_ = rhs.data_;
|
| 188 |
+
}
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
CallArg& operator=(const CallArg& rhs) {
|
| 192 |
+
if (rhs.data_ == rhs.buffer_) {
|
| 193 |
+
memcpy(this->buffer_, rhs.buffer_, sizeof(rhs.buffer_));
|
| 194 |
+
this->data_ = (void*)(this->buffer_);
|
| 195 |
+
} else {
|
| 196 |
+
this->data_ = rhs.data_;
|
| 197 |
+
}
|
| 198 |
+
return *this;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
#define ARG_PTR_DEFINE(Type, Name) \
|
| 202 |
+
Type* Name##Ptr() const { \
|
| 203 |
+
TORCH_INTERNAL_ASSERT(data_ == (void*)buffer_); \
|
| 204 |
+
return (Type*)data_; \
|
| 205 |
+
}
|
| 206 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
| 207 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, ARG_PTR_DEFINE);
|
| 208 |
+
#undef ARG_PTR_DEFINE
|
| 209 |
+
|
| 210 |
+
private:
|
| 211 |
+
void* data_;
|
| 212 |
+
// Regarding a scalar value, CallArg uses void**=&data_ to store it. But the
|
| 213 |
+
// bit width of a pointer is 32bit on a 32bit platform. It cannot store the
|
| 214 |
+
// scalar if the bit width of the scalar is larger than 32bit, such as double
|
| 215 |
+
// and long. Hence, we add 8 bytes buffer dedicated to storing the scalar
|
| 216 |
+
// value regardless its bit width is less or greater than 32bits.
|
| 217 |
+
char buffer_[8] = {0}; // 64bits
|
| 218 |
+
};
|
| 219 |
+
|
| 220 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 221 |
+
class RegisterCodeGenList {
|
| 222 |
+
public:
|
| 223 |
+
TORCH_API static RegisterCodeGenList& GetInstance() {
|
| 224 |
+
static RegisterCodeGenList codegen_list;
|
| 225 |
+
return codegen_list;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
using StmtFactoryMethod = std::function<std::unique_ptr<CodeGen>(
|
| 229 |
+
StmtPtr stmt,
|
| 230 |
+
const std::vector<CodeGen::BufferArg>&,
|
| 231 |
+
at::Device device,
|
| 232 |
+
const std::string& kernel_func_name)>;
|
| 233 |
+
|
| 234 |
+
TORCH_API StmtFactoryMethod FindStmtFactoryMethod(const std::string& name);
|
| 235 |
+
RegisterCodeGenList(const RegisterCodeGenList&) = delete;
|
| 236 |
+
RegisterCodeGenList& operator=(const RegisterCodeGenList&) = delete;
|
| 237 |
+
|
| 238 |
+
private:
|
| 239 |
+
template <class CodeGenType>
|
| 240 |
+
friend class RegisterCodeGen;
|
| 241 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 242 |
+
RegisterCodeGenList() = default;
|
| 243 |
+
TORCH_API void AddStmtFactoryMethod(
|
| 244 |
+
const std::string& name,
|
| 245 |
+
const StmtFactoryMethod& stmt_factory_method);
|
| 246 |
+
|
| 247 |
+
std::unordered_map<std::string, StmtFactoryMethod> stmt_factory_methods_;
|
| 248 |
+
};
|
| 249 |
+
|
| 250 |
+
template <class CodeGenType>
|
| 251 |
+
class RegisterCodeGen {
|
| 252 |
+
public:
|
| 253 |
+
explicit RegisterCodeGen(const std::string& name) {
|
| 254 |
+
RegisterCodeGenList& codegen_list = RegisterCodeGenList::GetInstance();
|
| 255 |
+
codegen_list.AddStmtFactoryMethod(
|
| 256 |
+
name,
|
| 257 |
+
[](StmtPtr stmt,
|
| 258 |
+
const std::vector<CodeGen::BufferArg>& params,
|
| 259 |
+
at::Device device,
|
| 260 |
+
const std::string& kernel_func_name) {
|
| 261 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 262 |
+
std::unique_ptr<CodeGen> method(
|
| 263 |
+
new CodeGenType(stmt, params, device, kernel_func_name));
|
| 264 |
+
return method;
|
| 265 |
+
});
|
| 266 |
+
}
|
| 267 |
+
};
|
| 268 |
+
|
| 269 |
+
TORCH_API std::unique_ptr<CodeGen> CreateCodeGen(
|
| 270 |
+
const std::string& name,
|
| 271 |
+
StmtPtr stmt,
|
| 272 |
+
const std::vector<CodeGen::BufferArg>& params,
|
| 273 |
+
at::Device device = at::kCPU,
|
| 274 |
+
const std::string& kernel_func_name = "func");
|
| 275 |
+
|
| 276 |
+
class TORCH_API GenericIntrinsicsExpander : public IRMutator {
|
| 277 |
+
protected:
|
| 278 |
+
ExprPtr mutate(IntrinsicsPtr v) override;
|
| 279 |
+
};
|
| 280 |
+
|
| 281 |
+
} // namespace tensorexpr
|
| 282 |
+
} // namespace jit
|
| 283 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_codegen.h
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace jit {
|
| 8 |
+
namespace tensorexpr {
|
| 9 |
+
|
| 10 |
+
class CppVarNameRewriter;
|
| 11 |
+
|
| 12 |
+
// Generates C++ code from the IR.
|
| 13 |
+
//
|
| 14 |
+
// Vector operations are unrolled.
|
| 15 |
+
// For example:
|
| 16 |
+
// C[Ramp(0, 1, 3)] = A[Ramp(0, 2, 3)] + B[Ramp(0, 3, 3)];
|
| 17 |
+
// is unrolled into:
|
| 18 |
+
// C[0] = A[0] + B[0];
|
| 19 |
+
// C[1] = A[2] + B[3];
|
| 20 |
+
// C[2] = A[4] + B[6];
|
| 21 |
+
class TORCH_API CppPrinter : public IRPrinter {
|
| 22 |
+
public:
|
| 23 |
+
explicit CppPrinter(std::ostream* os);
|
| 24 |
+
~CppPrinter() override;
|
| 25 |
+
|
| 26 |
+
void printPrologue();
|
| 27 |
+
|
| 28 |
+
using IRPrinter::visit;
|
| 29 |
+
|
| 30 |
+
// Binary expressions.
|
| 31 |
+
void visit(ModPtr) override;
|
| 32 |
+
void visit(MaxPtr) override;
|
| 33 |
+
void visit(MinPtr) override;
|
| 34 |
+
|
| 35 |
+
// Conditional expressions.
|
| 36 |
+
void visit(CompareSelectPtr) override;
|
| 37 |
+
void visit(IfThenElsePtr) override;
|
| 38 |
+
|
| 39 |
+
// Tensor operations.
|
| 40 |
+
void visit(AllocatePtr) override;
|
| 41 |
+
void visit(FreePtr) override;
|
| 42 |
+
void visit(LoadPtr) override;
|
| 43 |
+
void visit(StorePtr) override;
|
| 44 |
+
|
| 45 |
+
// Casts.
|
| 46 |
+
void visit(CastPtr) override;
|
| 47 |
+
void visit(BitCastPtr) override;
|
| 48 |
+
|
| 49 |
+
// Calls.
|
| 50 |
+
void visit(IntrinsicsPtr) override;
|
| 51 |
+
void visit(ExternalCallPtr) override;
|
| 52 |
+
|
| 53 |
+
// Vars.
|
| 54 |
+
void visit(LetPtr) override;
|
| 55 |
+
void visit(VarPtr) override;
|
| 56 |
+
|
| 57 |
+
// Vector data types.
|
| 58 |
+
void visit(RampPtr) override;
|
| 59 |
+
void visit(BroadcastPtr) override;
|
| 60 |
+
|
| 61 |
+
private:
|
| 62 |
+
int lane_;
|
| 63 |
+
std::unordered_map<VarPtr, ExprPtr> vector_vars_;
|
| 64 |
+
};
|
| 65 |
+
|
| 66 |
+
class TORCH_API CppCodeGen : public CodeGen {
|
| 67 |
+
public:
|
| 68 |
+
CppCodeGen(
|
| 69 |
+
StmtPtr stmt,
|
| 70 |
+
const std::vector<BufferArg>& buffer_args,
|
| 71 |
+
at::Device device = at::kCPU,
|
| 72 |
+
const std::string& kernel_func_name = "func");
|
| 73 |
+
|
| 74 |
+
~CppCodeGen() override;
|
| 75 |
+
|
| 76 |
+
void call(const std::vector<CallArg>& args) override;
|
| 77 |
+
void call_raw(const std::vector<void*>& args) override;
|
| 78 |
+
|
| 79 |
+
template <typename... Ts>
|
| 80 |
+
void operator()(const Ts&... ts) {
|
| 81 |
+
call(std::vector<CallArg>({CallArg(ts)...}));
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
std::string getCodeText(const std::string& attr = "") override {
|
| 85 |
+
return oss_.str();
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
private:
|
| 89 |
+
void init();
|
| 90 |
+
|
| 91 |
+
std::ostream& os() {
|
| 92 |
+
return printer_->os();
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
std::ostringstream oss_;
|
| 96 |
+
std::unique_ptr<CppPrinter> printer_;
|
| 97 |
+
std::unique_ptr<CppVarNameRewriter> var_name_rewriter_;
|
| 98 |
+
};
|
| 99 |
+
|
| 100 |
+
} // namespace tensorexpr
|
| 101 |
+
} // namespace jit
|
| 102 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cpp_intrinsics.h
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch {
|
| 4 |
+
namespace jit {
|
| 5 |
+
namespace tensorexpr {
|
| 6 |
+
|
| 7 |
+
constexpr auto cpp_intrinsics_definition = R"(
|
| 8 |
+
namespace std {
|
| 9 |
+
|
| 10 |
+
template <typename T,
|
| 11 |
+
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
|
| 12 |
+
T rsqrt(T v) {
|
| 13 |
+
return 1.0f / std::sqrt(v);
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
template <typename T,
|
| 17 |
+
typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0>
|
| 18 |
+
T frac(T v) {
|
| 19 |
+
T intpart;
|
| 20 |
+
return std::modf(v, &intpart);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
template <typename From, typename To>
|
| 24 |
+
To bitcast(const From& v) {
|
| 25 |
+
assert(sizeof(To) == sizeof(From));
|
| 26 |
+
To res;
|
| 27 |
+
std::memcpy(&res, &v, sizeof(From));
|
| 28 |
+
return res;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
} // namespace std
|
| 32 |
+
)";
|
| 33 |
+
|
| 34 |
+
} // namespace tensorexpr
|
| 35 |
+
} // namespace jit
|
| 36 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_codegen.h
ADDED
|
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <unordered_map>
|
| 4 |
+
#include <unordered_set>
|
| 5 |
+
|
| 6 |
+
#include <ATen/ATen.h>
|
| 7 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 8 |
+
#include <ATen/cuda/nvrtc_stub/ATenNVRTC.h>
|
| 9 |
+
#include <c10/cuda/CUDACachingAllocator.h>
|
| 10 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 11 |
+
#include <torch/csrc/jit/resource_guard.h>
|
| 12 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 13 |
+
#include <torch/csrc/jit/tensorexpr/eval.h>
|
| 14 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 15 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 16 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 17 |
+
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
|
| 18 |
+
#include <torch/csrc/jit/tensorexpr/unique_name_manager.h>
|
| 19 |
+
|
| 20 |
+
namespace torch {
|
| 21 |
+
namespace jit {
|
| 22 |
+
namespace tensorexpr {
|
| 23 |
+
|
| 24 |
+
// A class that analyzes the given program relevant for Cuda backends.
|
| 25 |
+
class CudaAnalysis : public IRVisitor {
|
| 26 |
+
public:
|
| 27 |
+
CudaAnalysis() {
|
| 28 |
+
gpu_block_extents_ = {alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
|
| 29 |
+
gpu_thread_extents_ = {
|
| 30 |
+
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
|
| 31 |
+
}
|
| 32 |
+
bool is_buf_store_target(BufPtr buf) const {
|
| 33 |
+
return store_targets_.count(buf) > 0;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
const std::unordered_set<VarPtr>& thread_local_bufs() const {
|
| 37 |
+
return thread_local_bufs_;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
const std::unordered_set<VarPtr>& cross_block_bufs() const {
|
| 41 |
+
return cross_block_bufs_;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
const std::vector<ExprPtr>& gpu_block_extents() const {
|
| 45 |
+
return gpu_block_extents_;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
const std::vector<ExprPtr>& gpu_thread_extents() const {
|
| 49 |
+
return gpu_thread_extents_;
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
private:
|
| 53 |
+
void visit(StorePtr v) override {
|
| 54 |
+
store_targets_.insert(v->buf());
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
void visit(AllocatePtr v) override;
|
| 58 |
+
void visit(FreePtr v) override;
|
| 59 |
+
void visit(PlacementAllocatePtr v) override;
|
| 60 |
+
void visit(ForPtr v) override;
|
| 61 |
+
|
| 62 |
+
std::unordered_set<BufPtr> store_targets_;
|
| 63 |
+
std::unordered_set<VarPtr> thread_local_bufs_;
|
| 64 |
+
std::unordered_set<VarPtr> cross_block_bufs_;
|
| 65 |
+
|
| 66 |
+
std::vector<ExprPtr> gpu_block_extents_;
|
| 67 |
+
std::vector<ExprPtr> gpu_thread_extents_;
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
// An IRMutator that replaces binding loop options with Cuda metavars, and masks
|
| 71 |
+
// statements blocks which should execute with less reach than the launch
|
| 72 |
+
// parameter extent.
|
| 73 |
+
//
|
| 74 |
+
// We do this by segmenting each block into chunks which should have the same
|
| 75 |
+
// execution parameters, then if those params differ from the max mask each dim.
|
| 76 |
+
class GPUMetaVarRewriter : public IRMutator {
|
| 77 |
+
public:
|
| 78 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 79 |
+
explicit GPUMetaVarRewriter(const CudaAnalysis* cuda_analysis)
|
| 80 |
+
: cuda_analysis_(cuda_analysis) {
|
| 81 |
+
gpu_block_vars_ = {
|
| 82 |
+
alloc<Var>("blockIdx.x", kInt),
|
| 83 |
+
alloc<Var>("blockIdx.y", kInt),
|
| 84 |
+
alloc<Var>("blockIdx.z", kInt)};
|
| 85 |
+
gpu_thread_vars_ = {
|
| 86 |
+
alloc<Var>("threadIdx.x", kInt),
|
| 87 |
+
alloc<Var>("threadIdx.y", kInt),
|
| 88 |
+
alloc<Var>("threadIdx.z", kInt)};
|
| 89 |
+
|
| 90 |
+
current_block_reach_ = {
|
| 91 |
+
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
|
| 92 |
+
current_thread_reach_ = {
|
| 93 |
+
alloc<IntImm>(1), alloc<IntImm>(1), alloc<IntImm>(1)};
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
StmtPtr mutate(ForPtr v) override;
|
| 97 |
+
StmtPtr mutate(BlockPtr v) override;
|
| 98 |
+
|
| 99 |
+
const std::vector<VarPtr>& gpu_block_vars() const {
|
| 100 |
+
return gpu_block_vars_;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
const std::vector<VarPtr>& gpu_thread_vars() const {
|
| 104 |
+
return gpu_thread_vars_;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
const std::vector<ExprPtr>& gpu_block_extents() const {
|
| 108 |
+
return cuda_analysis_->gpu_block_extents();
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
const std::vector<ExprPtr>& gpu_thread_extents() const {
|
| 112 |
+
return cuda_analysis_->gpu_thread_extents();
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
private:
|
| 116 |
+
// When processing a block, stores the contents of each sub-segment.
|
| 117 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 118 |
+
class Segment {
|
| 119 |
+
public:
|
| 120 |
+
void reset(bool mask) {
|
| 121 |
+
stmts_.clear();
|
| 122 |
+
mask_ = mask;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
bool empty() const {
|
| 126 |
+
return stmts_.empty();
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
std::vector<StmtPtr>& stmts() {
|
| 130 |
+
return stmts_;
|
| 131 |
+
}
|
| 132 |
+
bool mask() {
|
| 133 |
+
return mask_;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
private:
|
| 137 |
+
std::vector<StmtPtr> stmts_;
|
| 138 |
+
bool mask_{true};
|
| 139 |
+
};
|
| 140 |
+
|
| 141 |
+
// Returns true if the current execution scope is equivalent to the launch
|
| 142 |
+
// parameters.
|
| 143 |
+
bool isFullExtent();
|
| 144 |
+
|
| 145 |
+
std::vector<VarPtr> gpu_block_vars_;
|
| 146 |
+
std::vector<VarPtr> gpu_thread_vars_;
|
| 147 |
+
|
| 148 |
+
std::vector<ExprPtr> current_block_reach_;
|
| 149 |
+
std::vector<ExprPtr> current_thread_reach_;
|
| 150 |
+
|
| 151 |
+
const CudaAnalysis* cuda_analysis_;
|
| 152 |
+
};
|
| 153 |
+
|
| 154 |
+
// A class that overrides the underlying IRPrinter to produce Cuda C.
|
| 155 |
+
class CudaPrinter : public IRPrinter {
|
| 156 |
+
public:
|
| 157 |
+
explicit CudaPrinter(
|
| 158 |
+
std::ostream* os,
|
| 159 |
+
const CudaAnalysis* cuda_analysis,
|
| 160 |
+
bool has_random)
|
| 161 |
+
: IRPrinter(*os), cuda_analysis_(cuda_analysis) {
|
| 162 |
+
if (has_random) {
|
| 163 |
+
rand_func_ = alloc<Var>("rand", kHandle);
|
| 164 |
+
}
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
void visit(CastPtr v) override;
|
| 168 |
+
void visit(IntrinsicsPtr v) override;
|
| 169 |
+
void visit(ForPtr v) override;
|
| 170 |
+
|
| 171 |
+
void visit(LoadPtr v) override;
|
| 172 |
+
void visit(StorePtr v) override;
|
| 173 |
+
void visit(AtomicAddPtr v) override;
|
| 174 |
+
void visit(MaxPtr v) override;
|
| 175 |
+
void visit(MinPtr v) override;
|
| 176 |
+
void visit(IfThenElsePtr v) override;
|
| 177 |
+
void visit(BlockPtr v) override;
|
| 178 |
+
void visit(AllocatePtr v) override;
|
| 179 |
+
void visit(FreePtr v) override;
|
| 180 |
+
void visit(LetPtr v) override;
|
| 181 |
+
|
| 182 |
+
void visit(ExternalCallPtr v) override;
|
| 183 |
+
|
| 184 |
+
VarPtr rand_func() const {
|
| 185 |
+
return rand_func_;
|
| 186 |
+
}
|
| 187 |
+
|
| 188 |
+
std::string dtypeToCppString(const Dtype& dtype) override;
|
| 189 |
+
|
| 190 |
+
using IRPrinter::name_manager;
|
| 191 |
+
using IRPrinter::visit;
|
| 192 |
+
|
| 193 |
+
private:
|
| 194 |
+
VarPtr rand_func_;
|
| 195 |
+
const CudaAnalysis* cuda_analysis_;
|
| 196 |
+
|
| 197 |
+
void print_flat_alloc(AllocatePtr alloc);
|
| 198 |
+
};
|
| 199 |
+
|
| 200 |
+
// Construct Cuda C from the buffer and tensor input, and invoke the kernel
|
| 201 |
+
// when real arguments are provided.
|
| 202 |
+
class TORCH_CUDA_CU_API CudaCodeGen : public CodeGen {
|
| 203 |
+
public:
|
| 204 |
+
template <typename... Ts>
|
| 205 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 206 |
+
CudaCodeGen(StmtPtr stmt, Ts... ts)
|
| 207 |
+
: CodeGen(
|
| 208 |
+
stmt,
|
| 209 |
+
std::vector<BufferArg>({BufferArg(ts)...}),
|
| 210 |
+
at::Device(at::kCUDA, at::cuda::current_device())) {
|
| 211 |
+
Initialize();
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 215 |
+
CudaCodeGen(
|
| 216 |
+
StmtPtr stmt,
|
| 217 |
+
const std::vector<BufferArg>& buffer_args,
|
| 218 |
+
at::Device device = at::Device(at::kCUDA, at::cuda::current_device()),
|
| 219 |
+
const std::string& kernel_func_name = "func")
|
| 220 |
+
: CodeGen(stmt, buffer_args, device, kernel_func_name) {
|
| 221 |
+
Initialize();
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
~CudaCodeGen() override;
|
| 225 |
+
|
| 226 |
+
void call(const std::vector<CallArg>& args) override;
|
| 227 |
+
void call_raw(const std::vector<void*>& args) override;
|
| 228 |
+
void call_with_numel(void** args, int64_t numel) override;
|
| 229 |
+
|
| 230 |
+
template <typename... Ts>
|
| 231 |
+
void operator()(const Ts&... ts) {
|
| 232 |
+
call(std::vector<CallArg>({CallArg(ts)...}));
|
| 233 |
+
}
|
| 234 |
+
|
| 235 |
+
at::Tensor empty_strided(
|
| 236 |
+
c10::IntArrayRef size,
|
| 237 |
+
c10::IntArrayRef stride,
|
| 238 |
+
c10::optional<c10::ScalarType> dtype_opt,
|
| 239 |
+
c10::optional<c10::Layout> layout_opt,
|
| 240 |
+
c10::optional<c10::Device> device_opt,
|
| 241 |
+
c10::optional<bool> pin_memory_opt) override;
|
| 242 |
+
|
| 243 |
+
const std::vector<ExprPtr>& gpu_block_extents() const {
|
| 244 |
+
return cuda_analysis_->gpu_block_extents();
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
const std::vector<ExprPtr>& gpu_thread_extents() const {
|
| 248 |
+
return cuda_analysis_->gpu_thread_extents();
|
| 249 |
+
}
|
| 250 |
+
|
| 251 |
+
std::string getCodeText(const std::string& attr = "") override {
|
| 252 |
+
return oss_.str();
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
private:
|
| 256 |
+
void Initialize();
|
| 257 |
+
|
| 258 |
+
void CompileToNVRTC(const std::string& code, const std::string& func_name);
|
| 259 |
+
|
| 260 |
+
UniqueNameManager* name_manager() {
|
| 261 |
+
if (!printer_) {
|
| 262 |
+
throw std::runtime_error("Null IRPrinter is not expected");
|
| 263 |
+
}
|
| 264 |
+
return printer_->name_manager();
|
| 265 |
+
}
|
| 266 |
+
|
| 267 |
+
std::ostream& os() {
|
| 268 |
+
return printer_->os();
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
std::ostringstream oss_;
|
| 272 |
+
std::unique_ptr<CudaPrinter> printer_;
|
| 273 |
+
std::unique_ptr<CudaAnalysis> cuda_analysis_;
|
| 274 |
+
std::unique_ptr<GPUMetaVarRewriter> metavar_rewriter_;
|
| 275 |
+
std::unordered_set<std::string> taken_func_names;
|
| 276 |
+
std::mutex eval_lock_;
|
| 277 |
+
CUfunction function_;
|
| 278 |
+
bool has_random_ = false;
|
| 279 |
+
int thread_block_size_ = -1;
|
| 280 |
+
|
| 281 |
+
std::vector<bool> arg_pos_in_extents_;
|
| 282 |
+
#ifdef TORCH_ENABLE_LLVM
|
| 283 |
+
std::vector<ExprEval<LLVMCodeGen>> block_extents_eval_;
|
| 284 |
+
std::vector<ExprEval<LLVMCodeGen>> thread_extents_eval_;
|
| 285 |
+
#else
|
| 286 |
+
std::vector<ExprEval<SimpleIREvaluator>> block_extents_eval_;
|
| 287 |
+
std::vector<ExprEval<SimpleIREvaluator>> thread_extents_eval_;
|
| 288 |
+
#endif
|
| 289 |
+
|
| 290 |
+
std::string GetUniqueFuncName(const std::string& func_prefix);
|
| 291 |
+
};
|
| 292 |
+
|
| 293 |
+
} // namespace tensorexpr
|
| 294 |
+
} // namespace jit
|
| 295 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/cuda_random.h
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch {
|
| 4 |
+
namespace jit {
|
| 5 |
+
namespace tensorexpr {
|
| 6 |
+
|
| 7 |
+
constexpr auto philox_random_string = R"(
|
| 8 |
+
|
| 9 |
+
class Philox {
|
| 10 |
+
public:
|
| 11 |
+
__device__ inline Philox(unsigned long long seed,
|
| 12 |
+
unsigned long long subsequence,
|
| 13 |
+
unsigned long long offset) {
|
| 14 |
+
key.x = (unsigned int)seed;
|
| 15 |
+
key.y = (unsigned int)(seed >> 32);
|
| 16 |
+
counter = make_uint4(0, 0, 0, 0);
|
| 17 |
+
counter.z = (unsigned int)(subsequence);
|
| 18 |
+
counter.w = (unsigned int)(subsequence >> 32);
|
| 19 |
+
STATE = 0;
|
| 20 |
+
incr_n(offset / 4);
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
__device__ inline unsigned long operator()() {
|
| 24 |
+
if(STATE == 0) {
|
| 25 |
+
uint4 counter_ = counter;
|
| 26 |
+
uint2 key_ = key;
|
| 27 |
+
for(int i = 0; i < 9; i++) {
|
| 28 |
+
counter_ = single_round(counter_, key_);
|
| 29 |
+
key_.x += (kPhilox10A); key_.y += (kPhilox10B);
|
| 30 |
+
}
|
| 31 |
+
output = single_round(counter_, key_);
|
| 32 |
+
incr();
|
| 33 |
+
}
|
| 34 |
+
unsigned long ret;
|
| 35 |
+
switch(STATE) {
|
| 36 |
+
case 0: ret = output.x; break;
|
| 37 |
+
case 1: ret = output.y; break;
|
| 38 |
+
case 2: ret = output.z; break;
|
| 39 |
+
case 3: ret = output.w; break;
|
| 40 |
+
}
|
| 41 |
+
STATE = (STATE + 1) % 4;
|
| 42 |
+
return ret;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
private:
|
| 46 |
+
uint4 counter;
|
| 47 |
+
uint4 output;
|
| 48 |
+
uint2 key;
|
| 49 |
+
unsigned int STATE;
|
| 50 |
+
__device__ inline void incr_n(unsigned long long n) {
|
| 51 |
+
unsigned int nlo = (unsigned int)(n);
|
| 52 |
+
unsigned int nhi = (unsigned int)(n >> 32);
|
| 53 |
+
counter.x += nlo;
|
| 54 |
+
if (counter.x < nlo)
|
| 55 |
+
nhi++;
|
| 56 |
+
counter.y += nhi;
|
| 57 |
+
if (nhi <= counter.y)
|
| 58 |
+
return;
|
| 59 |
+
if (++counter.z)
|
| 60 |
+
return;
|
| 61 |
+
++counter.w;
|
| 62 |
+
}
|
| 63 |
+
__device__ inline void incr() {
|
| 64 |
+
if (++counter.x)
|
| 65 |
+
return;
|
| 66 |
+
if (++counter.y)
|
| 67 |
+
return;
|
| 68 |
+
if (++counter.z)
|
| 69 |
+
return;
|
| 70 |
+
++counter.w;
|
| 71 |
+
}
|
| 72 |
+
__device__ unsigned int mulhilo32(unsigned int a, unsigned int b,
|
| 73 |
+
unsigned int *result_high) {
|
| 74 |
+
*result_high = __umulhi(a, b);
|
| 75 |
+
return a*b;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
__device__ inline uint4 single_round(uint4 ctr, uint2 key) {
|
| 79 |
+
unsigned int hi0;
|
| 80 |
+
unsigned int hi1;
|
| 81 |
+
unsigned int lo0 = mulhilo32(kPhiloxSA, ctr.x, &hi0);
|
| 82 |
+
unsigned int lo1 = mulhilo32(kPhiloxSB, ctr.z, &hi1);
|
| 83 |
+
|
| 84 |
+
uint4 ret = {hi1 ^ ctr.y ^ key.x, lo1, hi0 ^ ctr.w ^ key.y, lo0};
|
| 85 |
+
return ret;
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
static const unsigned long kPhilox10A = 0x9E3779B9;
|
| 89 |
+
static const unsigned long kPhilox10B = 0xBB67AE85;
|
| 90 |
+
static const unsigned long kPhiloxSA = 0xD2511F53;
|
| 91 |
+
static const unsigned long kPhiloxSB = 0xCD9E8D57;
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
// Inverse of 2^32.
|
| 95 |
+
#define M_RAN_INVM32 2.3283064e-10f
|
| 96 |
+
__device__ __inline__ float Uint32ToFloat(unsigned int x) {
|
| 97 |
+
return x * M_RAN_INVM32;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
)";
|
| 101 |
+
|
| 102 |
+
} // namespace tensorexpr
|
| 103 |
+
} // namespace jit
|
| 104 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/eval.h
ADDED
|
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <cmath>
|
| 4 |
+
#include <cstring>
|
| 5 |
+
#include <type_traits>
|
| 6 |
+
#include <unordered_map>
|
| 7 |
+
#include <utility>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
#include <c10/macros/Macros.h>
|
| 11 |
+
#include <c10/util/Logging.h>
|
| 12 |
+
#include <c10/util/math_compat.h>
|
| 13 |
+
#include <c10/util/string_utils.h>
|
| 14 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 15 |
+
#include <torch/csrc/jit/tensorexpr/exceptions.h>
|
| 16 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 17 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 18 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 19 |
+
#include <torch/csrc/jit/tensorexpr/types.h>
|
| 20 |
+
#include <torch/csrc/jit/tensorexpr/var_substitutor.h>
|
| 21 |
+
|
| 22 |
+
namespace torch {
|
| 23 |
+
namespace jit {
|
| 24 |
+
namespace tensorexpr {
|
| 25 |
+
|
| 26 |
+
class InterpValue {
|
| 27 |
+
public:
|
| 28 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 29 |
+
InterpValue() : dtype_(kInt) {
|
| 30 |
+
Intvalues.push_back(0);
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
template <typename T>
|
| 34 |
+
InterpValue(Dtype dtype, T v) : dtype_(dtype) {
|
| 35 |
+
#define TYPE_CASE(Type, Name) \
|
| 36 |
+
if (dtype == k##Name) { \
|
| 37 |
+
Name##values.push_back(v); \
|
| 38 |
+
return; \
|
| 39 |
+
}
|
| 40 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
|
| 41 |
+
#undef TYPE_CASE
|
| 42 |
+
throw unsupported_dtype();
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
#define VALUE_CTOR(Type, Name) \
|
| 46 |
+
InterpValue(Type v) : dtype_(k##Name) { \
|
| 47 |
+
Name##values.push_back(v); \
|
| 48 |
+
}
|
| 49 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 50 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_CTOR);
|
| 51 |
+
#undef VALUE_CTOR
|
| 52 |
+
|
| 53 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 54 |
+
explicit InterpValue(c10::quint8 v) : dtype_(kQUInt8) {
|
| 55 |
+
QUInt8values.emplace_back(v.val_);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 59 |
+
explicit InterpValue(c10::qint8 v) : dtype_(kQInt8) {
|
| 60 |
+
QInt8values.emplace_back(v.val_);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
#define VALUE_VEC_CTOR(Type, Name) \
|
| 64 |
+
InterpValue(const std::vector<Type>& v) \
|
| 65 |
+
: dtype_(Dtype(k##Name, v.size())), Name##values(v) {}
|
| 66 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 67 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_VEC_CTOR);
|
| 68 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 69 |
+
VALUE_VEC_CTOR(c10::quint8, QUInt8)
|
| 70 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 71 |
+
VALUE_VEC_CTOR(c10::qint8, QInt8)
|
| 72 |
+
#undef VALUE_VEC_CTOR
|
| 73 |
+
|
| 74 |
+
template <typename T>
|
| 75 |
+
T as() const;
|
| 76 |
+
|
| 77 |
+
template <typename T>
|
| 78 |
+
const std::vector<T>& as_vec() const;
|
| 79 |
+
|
| 80 |
+
int64_t intValue() const;
|
| 81 |
+
|
| 82 |
+
Dtype dtype() const {
|
| 83 |
+
return dtype_;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
private:
|
| 87 |
+
Dtype dtype_;
|
| 88 |
+
|
| 89 |
+
#define VALUE_STORAGE(Type, Name) std::vector<Type> Name##values;
|
| 90 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_STORAGE);
|
| 91 |
+
VALUE_STORAGE(c10::qint8, QInt8);
|
| 92 |
+
VALUE_STORAGE(c10::quint8, QUInt8);
|
| 93 |
+
#undef VALUE_STORAGE
|
| 94 |
+
void* ptr;
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
#define VALUE_AS_DISPATCH(Type, Name) \
|
| 98 |
+
template <> \
|
| 99 |
+
inline Type InterpValue::as<Type>() const { \
|
| 100 |
+
if (dtype_ != k##Name) { \
|
| 101 |
+
throw unsupported_dtype(); \
|
| 102 |
+
} \
|
| 103 |
+
return Name##values[0]; \
|
| 104 |
+
}
|
| 105 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_DISPATCH);
|
| 106 |
+
VALUE_AS_DISPATCH(c10::quint8, QUInt8);
|
| 107 |
+
VALUE_AS_DISPATCH(c10::qint8, QInt8);
|
| 108 |
+
#undef VALUE_AS_DISPATCH
|
| 109 |
+
|
| 110 |
+
#define VALUE_AS_VEC_DISPATCH(Type, Name) \
|
| 111 |
+
template <> \
|
| 112 |
+
inline const std::vector<Type>& InterpValue::as_vec<Type>() const { \
|
| 113 |
+
if (dtype_.scalar_type() != ScalarType::Name) { \
|
| 114 |
+
throw unsupported_dtype(); \
|
| 115 |
+
} \
|
| 116 |
+
return Name##values; \
|
| 117 |
+
}
|
| 118 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, VALUE_AS_VEC_DISPATCH);
|
| 119 |
+
VALUE_AS_VEC_DISPATCH(c10::quint8, QUInt8);
|
| 120 |
+
VALUE_AS_VEC_DISPATCH(c10::qint8, QInt8);
|
| 121 |
+
#undef VALUE_AS_VEC_DISPATCH
|
| 122 |
+
|
| 123 |
+
template <typename Type>
|
| 124 |
+
auto underlyingValue(Type x) {
|
| 125 |
+
return x;
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
template <>
|
| 129 |
+
inline auto underlyingValue<c10::quint8>(c10::quint8 x) {
|
| 130 |
+
return x.val_;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
template <>
|
| 134 |
+
inline auto underlyingValue<c10::qint8>(c10::qint8 x) {
|
| 135 |
+
return x.val_;
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
template <typename To, typename From>
|
| 139 |
+
To raw_bitcast(const From& src) {
|
| 140 |
+
TORCH_CHECK(sizeof(To) == sizeof(From), "Invalid bitcast invocation");
|
| 141 |
+
To storage;
|
| 142 |
+
std::memcpy(&storage, &src, sizeof(To));
|
| 143 |
+
return reinterpret_cast<To&>(storage);
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
class SimpleIREvaluatorImpl;
|
| 147 |
+
class TORCH_API SimpleIREvaluator : public CodeGen {
|
| 148 |
+
public:
|
| 149 |
+
SimpleIREvaluator(
|
| 150 |
+
StmtPtr stmt,
|
| 151 |
+
const std::vector<BufferArg>& buffer_args,
|
| 152 |
+
at::Device device = at::kCPU,
|
| 153 |
+
const std::string& kernel_func_name = "func");
|
| 154 |
+
|
| 155 |
+
~SimpleIREvaluator() override;
|
| 156 |
+
|
| 157 |
+
void call(const std::vector<CallArg>& args) override;
|
| 158 |
+
void call_raw(const std::vector<void*>& args) override;
|
| 159 |
+
|
| 160 |
+
template <typename... Ts>
|
| 161 |
+
void operator()(const Ts&... ts) {
|
| 162 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 163 |
+
std::vector<CallArg> args({CallArg(ts)...});
|
| 164 |
+
call(args);
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
void bindVar(VarPtr v, ExprPtr e);
|
| 168 |
+
InterpValue value() const;
|
| 169 |
+
|
| 170 |
+
private:
|
| 171 |
+
void bindArg(const BufferArg& buf, void* data);
|
| 172 |
+
void expand_intrinsics() {
|
| 173 |
+
GenericIntrinsicsExpander intrinsics_expander;
|
| 174 |
+
apply_mutator(&intrinsics_expander);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
std::unique_ptr<SimpleIREvaluatorImpl> impl_;
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
template <class CodeGenType>
|
| 181 |
+
class ExprEval {
|
| 182 |
+
public:
|
| 183 |
+
using BufferArg = CodeGen::BufferArg;
|
| 184 |
+
using CallArg = CodeGen::CallArg;
|
| 185 |
+
|
| 186 |
+
template <typename... Ts>
|
| 187 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 188 |
+
ExprEval(const ExprHandle& expr, Ts... ts)
|
| 189 |
+
: ExprEval(expr, {BufferArg(ts)...}) {}
|
| 190 |
+
|
| 191 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 192 |
+
ExprEval(const ExprHandle& expr, const std::vector<BufferArg>& buffer_args)
|
| 193 |
+
: dtype_(expr.dtype()) {
|
| 194 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 195 |
+
std::vector<BufferArg> buffer_args_extended = buffer_args;
|
| 196 |
+
BufHandle ret_buf("ret_val", {1}, dtype_);
|
| 197 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 198 |
+
std::vector<ExprHandle> indices;
|
| 199 |
+
ExprHandle zero = IntImm::make(0);
|
| 200 |
+
for (size_t i = 0; i < ret_buf.ndim(); i++) {
|
| 201 |
+
indices.push_back(zero);
|
| 202 |
+
}
|
| 203 |
+
StmtPtr store_stmt = Store::make(ret_buf, indices, expr);
|
| 204 |
+
buffer_args_extended.emplace_back(ret_buf);
|
| 205 |
+
codegen_.reset(new CodeGenType(store_stmt, buffer_args_extended));
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
template <typename... Ts>
|
| 209 |
+
void operator()(Ts... ts) {
|
| 210 |
+
call(ts...);
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
void operator()(const std::vector<CallArg>& call_args) {
|
| 214 |
+
call(call_args);
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
void bindVar(VarPtr v, ExprPtr e) {
|
| 218 |
+
codegen_->bindVar(v, e);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
void bindVar(const VarHandle& v, const ExprHandle& e) {
|
| 222 |
+
codegen_->bindVar(v.node(), e.node());
|
| 223 |
+
}
|
| 224 |
+
|
| 225 |
+
template <typename... Ts>
|
| 226 |
+
void call(Ts... ts) {
|
| 227 |
+
call({CallArg(ts)...});
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
void call(const std::vector<CallArg>& call_args) {
|
| 231 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 232 |
+
std::vector<CallArg> call_args_extended = call_args;
|
| 233 |
+
switch (dtype_.scalar_type()) {
|
| 234 |
+
#define TYPE_CASE(Type, Name) \
|
| 235 |
+
case ScalarType::Name: { \
|
| 236 |
+
std::vector<Type> ret_val_arg(1); \
|
| 237 |
+
call_args_extended.push_back(CallArg(ret_val_arg)); \
|
| 238 |
+
codegen_->call(call_args_extended); \
|
| 239 |
+
ret_value_ = InterpValue(ret_val_arg[0]); \
|
| 240 |
+
} break;
|
| 241 |
+
// NOLINTNEXTLINE(modernize-use-emplace)
|
| 242 |
+
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
|
| 243 |
+
// NOLINTNEXTLINE(modernize-use-emplace)
|
| 244 |
+
TYPE_CASE(c10::quint8, QUInt8);
|
| 245 |
+
// NOLINTNEXTLINE(modernize-use-emplace)
|
| 246 |
+
TYPE_CASE(c10::qint8, QInt8);
|
| 247 |
+
#undef TYPE_CASE
|
| 248 |
+
case ScalarType::Bool: {
|
| 249 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 250 |
+
std::vector<unsigned char> ret_val_arg(1);
|
| 251 |
+
call_args_extended.emplace_back(ret_val_arg.data());
|
| 252 |
+
codegen_->call(call_args_extended);
|
| 253 |
+
ret_value_ = InterpValue((bool)ret_val_arg[0]);
|
| 254 |
+
} break;
|
| 255 |
+
default:
|
| 256 |
+
throw unsupported_dtype();
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
void call_raw(const std::vector<void*>& args) {
|
| 261 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 262 |
+
std::vector<void*> args_extended = args;
|
| 263 |
+
switch (dtype_.scalar_type()) {
|
| 264 |
+
#define TYPE_CASE(Type, Name) \
|
| 265 |
+
case ScalarType::Name: { \
|
| 266 |
+
std::vector<Type> ret_val_arg(1); \
|
| 267 |
+
args_extended.push_back(ret_val_arg.data()); \
|
| 268 |
+
codegen_->call_raw(args_extended); \
|
| 269 |
+
ret_value_ = InterpValue(ret_val_arg[0]); \
|
| 270 |
+
} break;
|
| 271 |
+
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TYPE_CASE);
|
| 272 |
+
TYPE_CASE(c10::quint8, QUInt8);
|
| 273 |
+
TYPE_CASE(c10::qint8, QInt8);
|
| 274 |
+
#undef TYPE_CASE
|
| 275 |
+
case ScalarType::Bool: {
|
| 276 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 277 |
+
std::vector<unsigned char> ret_val_arg(1);
|
| 278 |
+
args_extended.push_back(ret_val_arg.data());
|
| 279 |
+
codegen_->call_raw(args_extended);
|
| 280 |
+
ret_value_ = InterpValue((bool)ret_val_arg[0]);
|
| 281 |
+
} break;
|
| 282 |
+
default:
|
| 283 |
+
throw unsupported_dtype();
|
| 284 |
+
}
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
template <typename T>
|
| 288 |
+
T value(const std::vector<void*>& args) {
|
| 289 |
+
call_raw(args);
|
| 290 |
+
return ret_value_.as<T>();
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
template <typename T, typename... Ts>
|
| 294 |
+
T value(Ts... ts) {
|
| 295 |
+
call(std::forward<Ts>(ts)...);
|
| 296 |
+
return ret_value_.as<T>();
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
Dtype dtype() {
|
| 300 |
+
return dtype_;
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
private:
|
| 304 |
+
Dtype dtype_;
|
| 305 |
+
std::unique_ptr<CodeGenType> codegen_;
|
| 306 |
+
InterpValue ret_value_;
|
| 307 |
+
};
|
| 308 |
+
|
| 309 |
+
// Evaluates the given expression and returns an int64_t value if the result of
|
| 310 |
+
// the given expression is int64_t.
|
| 311 |
+
c10::optional<int64_t> evalInt(ExprPtr e);
|
| 312 |
+
|
| 313 |
+
// Substitutes the given vars with their corresponding expressions in the input
|
| 314 |
+
// expression.
|
| 315 |
+
inline ExprPtr Substitute(ExprPtr expr, const VarMapping& var_mapping) {
|
| 316 |
+
VarSubMutator var_sub(var_mapping);
|
| 317 |
+
return expr->accept_mutator(&var_sub);
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
// Substitutes the given vars with their corresponding expressions in the input
|
| 321 |
+
// statement.
|
| 322 |
+
inline StmtPtr Substitute(StmtPtr stmt, const VarMapping& var_mapping) {
|
| 323 |
+
VarSubMutator var_sub(var_mapping);
|
| 324 |
+
return stmt->accept_mutator(&var_sub);
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
// Creates a clone of the input expression and substitutes the given vars with
|
| 328 |
+
// their corresponding expressions in the clone.
|
| 329 |
+
// NOTE: This works because cloning reuses variables and does not create new
|
| 330 |
+
// ones, and `VarMapping` input has variables as the key.
|
| 331 |
+
inline ExprPtr SubstituteInClone(ExprPtr expr, const VarMapping& var_mapping) {
|
| 332 |
+
VarSubMutator var_sub(var_mapping);
|
| 333 |
+
return Expr::clone(std::move(expr))->accept_mutator(&var_sub);
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
// Creates a clone of the input statement and substitutes the given vars with
|
| 337 |
+
// their corresponding expressions in the clone.
|
| 338 |
+
// NOTE: This works because cloning reuses variables and does not create new
|
| 339 |
+
// ones, and `VarMapping` input has variables as the key.
|
| 340 |
+
inline StmtPtr SubstituteInClone(StmtPtr stmt, const VarMapping& var_mapping) {
|
| 341 |
+
VarSubMutator var_sub(var_mapping);
|
| 342 |
+
return Stmt::clone(std::move(stmt))->accept_mutator(&var_sub);
|
| 343 |
+
}
|
| 344 |
+
|
| 345 |
+
} // namespace tensorexpr
|
| 346 |
+
} // namespace jit
|
| 347 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/exceptions.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 5 |
+
|
| 6 |
+
#include <sstream>
|
| 7 |
+
#include <stdexcept>
|
| 8 |
+
|
| 9 |
+
// Forward declarations of types
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace jit {
|
| 12 |
+
namespace tensorexpr {
|
| 13 |
+
class Expr;
|
| 14 |
+
class Stmt;
|
| 15 |
+
} // namespace tensorexpr
|
| 16 |
+
} // namespace jit
|
| 17 |
+
} // namespace torch
|
| 18 |
+
|
| 19 |
+
// Forward declarations of functions
|
| 20 |
+
namespace std {
|
| 21 |
+
TORCH_API std::string to_string(const torch::jit::tensorexpr::ExprPtr);
|
| 22 |
+
TORCH_API std::string to_string(const torch::jit::tensorexpr::StmtPtr);
|
| 23 |
+
} // namespace std
|
| 24 |
+
|
| 25 |
+
namespace torch {
|
| 26 |
+
namespace jit {
|
| 27 |
+
namespace tensorexpr {
|
| 28 |
+
|
| 29 |
+
class unsupported_dtype : public std::runtime_error {
|
| 30 |
+
public:
|
| 31 |
+
explicit unsupported_dtype() : std::runtime_error("UNSUPPORTED DTYPE") {}
|
| 32 |
+
explicit unsupported_dtype(const std::string& err)
|
| 33 |
+
: std::runtime_error("UNSUPPORTED DTYPE: " + err) {}
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
class out_of_range_index : public std::runtime_error {
|
| 37 |
+
public:
|
| 38 |
+
explicit out_of_range_index() : std::runtime_error("OUT OF RANGE INDEX") {}
|
| 39 |
+
explicit out_of_range_index(const std::string& err)
|
| 40 |
+
: std::runtime_error("OUT OF RANGE INDEX: " + err) {}
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
class unimplemented_lowering : public std::runtime_error {
|
| 44 |
+
public:
|
| 45 |
+
explicit unimplemented_lowering()
|
| 46 |
+
: std::runtime_error("UNIMPLEMENTED LOWERING") {}
|
| 47 |
+
explicit unimplemented_lowering(ExprPtr expr)
|
| 48 |
+
: std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(expr)) {}
|
| 49 |
+
explicit unimplemented_lowering(StmtPtr stmt)
|
| 50 |
+
: std::runtime_error("UNIMPLEMENTED LOWERING: " + std::to_string(stmt)) {}
|
| 51 |
+
};
|
| 52 |
+
|
| 53 |
+
class malformed_input : public std::runtime_error {
|
| 54 |
+
public:
|
| 55 |
+
explicit malformed_input() : std::runtime_error("MALFORMED INPUT") {}
|
| 56 |
+
explicit malformed_input(const std::string& err)
|
| 57 |
+
: std::runtime_error("MALFORMED INPUT: " + err) {}
|
| 58 |
+
explicit malformed_input(ExprPtr expr)
|
| 59 |
+
: std::runtime_error("MALFORMED INPUT: " + std::to_string(expr)) {}
|
| 60 |
+
explicit malformed_input(const std::string& err, ExprPtr expr)
|
| 61 |
+
: std::runtime_error(
|
| 62 |
+
"MALFORMED INPUT: " + err + " - " + std::to_string(expr)) {}
|
| 63 |
+
explicit malformed_input(StmtPtr stmt)
|
| 64 |
+
: std::runtime_error("MALFORMED INPUT: " + std::to_string(stmt)) {}
|
| 65 |
+
explicit malformed_input(const std::string& err, StmtPtr stmt)
|
| 66 |
+
: std::runtime_error(
|
| 67 |
+
"MALFORMED INPUT: " + err + " - " + std::to_string(stmt)) {}
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
class malformed_ir : public std::runtime_error {
|
| 71 |
+
public:
|
| 72 |
+
explicit malformed_ir() : std::runtime_error("MALFORMED IR") {}
|
| 73 |
+
explicit malformed_ir(const std::string& err)
|
| 74 |
+
: std::runtime_error("MALFORMED IR: " + err) {}
|
| 75 |
+
explicit malformed_ir(ExprPtr expr)
|
| 76 |
+
: std::runtime_error("MALFORMED IR: " + std::to_string(expr)) {}
|
| 77 |
+
explicit malformed_ir(const std::string& err, ExprPtr expr)
|
| 78 |
+
: std::runtime_error(
|
| 79 |
+
"MALFORMED IR: " + err + " - " + std::to_string(expr)) {}
|
| 80 |
+
explicit malformed_ir(StmtPtr stmt)
|
| 81 |
+
: std::runtime_error("MALFORMED IR: " + std::to_string(stmt)) {}
|
| 82 |
+
explicit malformed_ir(const std::string& err, StmtPtr stmt)
|
| 83 |
+
: std::runtime_error(
|
| 84 |
+
"MALFORMED IR: " + err + " - " + std::to_string(stmt)) {}
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
TORCH_API std::string buildErrorMessage(const std::string& s = "");
|
| 88 |
+
|
| 89 |
+
} // namespace tensorexpr
|
| 90 |
+
} // namespace jit
|
| 91 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/expr.h
ADDED
|
@@ -0,0 +1,499 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* This file implements the core classes for Tensor Expressions.
|
| 3 |
+
*
|
| 4 |
+
* The structure of the expressions is inspired by Halide/TVM IR.
|
| 5 |
+
*/
|
| 6 |
+
#pragma once
|
| 7 |
+
|
| 8 |
+
#include <c10/core/MemoryFormat.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 11 |
+
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
|
| 12 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 13 |
+
#include <torch/csrc/jit/tensorexpr/types.h>
|
| 14 |
+
|
| 15 |
+
#include <utility>
|
| 16 |
+
|
| 17 |
+
namespace torch {
|
| 18 |
+
namespace jit {
|
| 19 |
+
namespace tensorexpr {
|
| 20 |
+
|
| 21 |
+
enum IRNodeType {
|
| 22 |
+
kPrimitive,
|
| 23 |
+
kAdd,
|
| 24 |
+
kSub,
|
| 25 |
+
kMul,
|
| 26 |
+
kDiv,
|
| 27 |
+
kMod,
|
| 28 |
+
kMax,
|
| 29 |
+
kMin,
|
| 30 |
+
kAnd,
|
| 31 |
+
kOr,
|
| 32 |
+
kLshift,
|
| 33 |
+
kRshift,
|
| 34 |
+
kXor,
|
| 35 |
+
kCompareSelect,
|
| 36 |
+
kCast,
|
| 37 |
+
kBitCast,
|
| 38 |
+
kOther,
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
// The common base between all expression node.
|
| 42 |
+
class TORCH_API Expr : public std::enable_shared_from_this<Expr> {
|
| 43 |
+
public:
|
| 44 |
+
explicit Expr(Dtype dtype, IRNodeType expr_type = kOther)
|
| 45 |
+
: dtype_(dtype), expr_type_(expr_type) {}
|
| 46 |
+
virtual ~Expr() = default;
|
| 47 |
+
Dtype dtype() const {
|
| 48 |
+
return dtype_;
|
| 49 |
+
}
|
| 50 |
+
virtual void accept(IRVisitor* visitor) = 0;
|
| 51 |
+
virtual ExprPtr accept_mutator(IRMutator* mutator) = 0;
|
| 52 |
+
|
| 53 |
+
IRNodeType expr_type() const {
|
| 54 |
+
return expr_type_;
|
| 55 |
+
}
|
| 56 |
+
// Is this a fixed (constant) immediate value.
|
| 57 |
+
virtual bool isConstant() const {
|
| 58 |
+
return false;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
void set_dtype(Dtype dtype) {
|
| 62 |
+
dtype_ = dtype;
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
/*
|
| 66 |
+
* Make a deep copy of the given expression.
|
| 67 |
+
*
|
| 68 |
+
* All sub-expressions inside the given expressions are also cloned. Note
|
| 69 |
+
* that the variables are not deep-copied since they are immutable.
|
| 70 |
+
*/
|
| 71 |
+
static ExprPtr clone(ExprPtr s);
|
| 72 |
+
|
| 73 |
+
protected:
|
| 74 |
+
std::shared_ptr<Expr> getptr() {
|
| 75 |
+
return shared_from_this();
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
private:
|
| 79 |
+
Dtype dtype_;
|
| 80 |
+
IRNodeType expr_type_;
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
// A CRTP pattern to accept visitors for children class,
|
| 84 |
+
// and dispatch back to the children.
|
| 85 |
+
template <class Op, class Base = Expr>
|
| 86 |
+
class ExprNode : public Base {
|
| 87 |
+
public:
|
| 88 |
+
using ExprNodeBase = ExprNode<Op>;
|
| 89 |
+
void accept(IRVisitor* visitor) override {
|
| 90 |
+
visitor->visit(static_to<Op>(Base::getptr()));
|
| 91 |
+
}
|
| 92 |
+
ExprPtr accept_mutator(IRMutator* mutator) override;
|
| 93 |
+
// pass the constructor to the base class
|
| 94 |
+
using Base::Base;
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
// A wrapper object to the underlying ExprNode.
|
| 98 |
+
// Also serves the primary way to build and operate on other expressions.
|
| 99 |
+
class TORCH_API ExprHandle {
|
| 100 |
+
public:
|
| 101 |
+
ExprHandle() = default;
|
| 102 |
+
explicit ExprHandle(ExprPtr node) : base_expr_node_(std::move(node)) {}
|
| 103 |
+
|
| 104 |
+
ExprPtr node() {
|
| 105 |
+
return base_expr_node_;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
ExprPtr node() const {
|
| 109 |
+
return base_expr_node_;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
bool empty() const {
|
| 113 |
+
return base_expr_node_ == nullptr;
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
#define IMM_EXPR_DECLARE(Type, Name) ExprHandle(Type v);
|
| 117 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_EXPR_DECLARE);
|
| 118 |
+
#undef IMM_EXPR_DECLARE
|
| 119 |
+
|
| 120 |
+
template <class Op>
|
| 121 |
+
NodePtr<Op> AsNode() {
|
| 122 |
+
return to<Op>(this->node());
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
template <class Op>
|
| 126 |
+
NodePtr<Op> AsNode() const {
|
| 127 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-const-cast)
|
| 128 |
+
return const_cast<ExprHandle*>(this)->AsNode<Op>();
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
Dtype dtype() const {
|
| 132 |
+
return node()->dtype();
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
// Handling the math operators.
|
| 136 |
+
ExprHandle operator+(const ExprHandle& other) const;
|
| 137 |
+
ExprHandle operator-(const ExprHandle& other) const;
|
| 138 |
+
ExprHandle operator*(const ExprHandle& other) const;
|
| 139 |
+
ExprHandle operator/(const ExprHandle& other) const;
|
| 140 |
+
ExprHandle operator%(const ExprHandle& other) const;
|
| 141 |
+
ExprHandle operator==(const ExprHandle& other) const;
|
| 142 |
+
ExprHandle operator!=(const ExprHandle& other) const;
|
| 143 |
+
ExprHandle operator>(const ExprHandle& other) const;
|
| 144 |
+
ExprHandle operator>=(const ExprHandle& other) const;
|
| 145 |
+
ExprHandle operator<(const ExprHandle& other) const;
|
| 146 |
+
ExprHandle operator<=(const ExprHandle& other) const;
|
| 147 |
+
ExprHandle operator&(const ExprHandle& other) const;
|
| 148 |
+
ExprHandle operator|(const ExprHandle& other) const;
|
| 149 |
+
ExprHandle operator&&(const ExprHandle& other) const;
|
| 150 |
+
ExprHandle operator||(const ExprHandle& other) const;
|
| 151 |
+
ExprHandle operator^(const ExprHandle& other) const;
|
| 152 |
+
ExprHandle operator<<(const ExprHandle& other) const;
|
| 153 |
+
ExprHandle operator>>(const ExprHandle& other) const;
|
| 154 |
+
|
| 155 |
+
private:
|
| 156 |
+
ExprPtr base_expr_node_ = nullptr;
|
| 157 |
+
};
|
| 158 |
+
|
| 159 |
+
// The underlying representation node to a Var.
|
| 160 |
+
// Currently, each Var object represents a unique variable, even though the
|
| 161 |
+
// names might be the same. We should consider add a unique_name as well.
|
| 162 |
+
class TORCH_API Var : public ExprNode<Var> {
|
| 163 |
+
public:
|
| 164 |
+
static ExprHandle make(const std::string& name_hint, Dtype dtype) {
|
| 165 |
+
return ExprHandle(alloc<Var>(name_hint, dtype));
|
| 166 |
+
}
|
| 167 |
+
static ExprHandle make(Dtype dtype) {
|
| 168 |
+
return ExprHandle(alloc<Var>("", dtype));
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
// TODO: unique_name
|
| 172 |
+
const std::string& name_hint() const {
|
| 173 |
+
return name_hint_;
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
void set_name_hint(const std::string& name) {
|
| 177 |
+
name_hint_ = name;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
void set_name_hint(std::string&& name) {
|
| 181 |
+
name_hint_ = std::move(name);
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
Var(std::string name_hint, Dtype dtype)
|
| 185 |
+
: ExprNodeBase(dtype, kPrimitive), name_hint_(std::move(name_hint)) {}
|
| 186 |
+
|
| 187 |
+
private:
|
| 188 |
+
std::string name_hint_;
|
| 189 |
+
};
|
| 190 |
+
|
| 191 |
+
TORCH_API std::vector<ExprPtr> make_contiguous_strides(
|
| 192 |
+
const std::vector<ExprHandle>& dims);
|
| 193 |
+
TORCH_API std::vector<ExprPtr> make_channels_last_strides(
|
| 194 |
+
const std::vector<ExprHandle>& dims);
|
| 195 |
+
|
| 196 |
+
class TORCH_API Buf : public ExprNode<Buf> {
|
| 197 |
+
public:
|
| 198 |
+
static BufHandle make(const std::vector<ExprHandle>& dims, Dtype dtype);
|
| 199 |
+
|
| 200 |
+
static BufHandle make(
|
| 201 |
+
const std::string& name_hint,
|
| 202 |
+
const std::vector<ExprHandle>& dims,
|
| 203 |
+
const std::vector<ExprHandle>& strides,
|
| 204 |
+
Dtype dtype);
|
| 205 |
+
|
| 206 |
+
static BufHandle make(
|
| 207 |
+
const std::string& name_hint,
|
| 208 |
+
const std::vector<ExprHandle>& dims,
|
| 209 |
+
Dtype dtype,
|
| 210 |
+
c10::optional<ExprHandle> initializer = c10::nullopt,
|
| 211 |
+
c10::optional<std::vector<ExprHandle>> strides = c10::nullopt,
|
| 212 |
+
c10::optional<ExprHandle> qscale = c10::nullopt,
|
| 213 |
+
c10::optional<ExprHandle> qzero = c10::nullopt);
|
| 214 |
+
|
| 215 |
+
// TODO: unique_name
|
| 216 |
+
VarPtr base_handle() const {
|
| 217 |
+
return base_handle_;
|
| 218 |
+
}
|
| 219 |
+
void set_base_handle(VarPtr base_handle) {
|
| 220 |
+
base_handle_ = std::move(base_handle);
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
const std::string& name_hint() const {
|
| 224 |
+
return base_handle_->name_hint();
|
| 225 |
+
}
|
| 226 |
+
void set_name_hint(const std::string& name_hint) {
|
| 227 |
+
base_handle_->set_name_hint(name_hint);
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 231 |
+
Buf(const std::string& name_hint,
|
| 232 |
+
const std::vector<ExprPtr>& dims,
|
| 233 |
+
Dtype dtype,
|
| 234 |
+
ExprPtr initializer = nullptr,
|
| 235 |
+
c10::optional<std::vector<ExprPtr>> strides = c10::nullopt,
|
| 236 |
+
ExprPtr qscale = nullptr,
|
| 237 |
+
ExprPtr qzero = nullptr)
|
| 238 |
+
: Buf(alloc<Var>(name_hint, kHandle),
|
| 239 |
+
dims,
|
| 240 |
+
dtype,
|
| 241 |
+
std::move(initializer),
|
| 242 |
+
std::move(strides),
|
| 243 |
+
std::move(qscale),
|
| 244 |
+
std::move(qzero)) {}
|
| 245 |
+
|
| 246 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 247 |
+
Buf(VarPtr var,
|
| 248 |
+
std::vector<ExprPtr> dims,
|
| 249 |
+
Dtype dtype,
|
| 250 |
+
ExprPtr initializer = nullptr,
|
| 251 |
+
c10::optional<std::vector<ExprPtr>> strides = c10::nullopt,
|
| 252 |
+
ExprPtr qscale = nullptr,
|
| 253 |
+
ExprPtr qzero = nullptr);
|
| 254 |
+
|
| 255 |
+
size_t ndim() const {
|
| 256 |
+
return dims_.size();
|
| 257 |
+
}
|
| 258 |
+
ExprPtr dim(size_t index) const {
|
| 259 |
+
if (index >= ndim()) {
|
| 260 |
+
throw out_of_range_index();
|
| 261 |
+
}
|
| 262 |
+
return dims_[index];
|
| 263 |
+
}
|
| 264 |
+
std::vector<ExprPtr> dims() const {
|
| 265 |
+
return dims_;
|
| 266 |
+
}
|
| 267 |
+
void set_dims(std::vector<ExprPtr> dims) {
|
| 268 |
+
dims_ = std::move(dims);
|
| 269 |
+
}
|
| 270 |
+
|
| 271 |
+
std::vector<ExprPtr> strides() const {
|
| 272 |
+
return strides_;
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
void set_strides(std::vector<ExprPtr> strides) {
|
| 276 |
+
strides_ = std::move(strides);
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
ExprPtr initializer() const {
|
| 280 |
+
return initializer_;
|
| 281 |
+
};
|
| 282 |
+
|
| 283 |
+
ExprPtr qzero() const {
|
| 284 |
+
return qzero_;
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
ExprPtr qscale() const {
|
| 288 |
+
return qscale_;
|
| 289 |
+
}
|
| 290 |
+
|
| 291 |
+
void set_qzero(ExprPtr qzero) {
|
| 292 |
+
qzero_ = std::move(qzero);
|
| 293 |
+
}
|
| 294 |
+
|
| 295 |
+
void set_qscale(ExprPtr qscale) {
|
| 296 |
+
qscale_ = std::move(qscale);
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
bool hasConstantDims() const {
|
| 300 |
+
for (const auto& d : dims_) {
|
| 301 |
+
if (!d->isConstant()) {
|
| 302 |
+
return false;
|
| 303 |
+
}
|
| 304 |
+
}
|
| 305 |
+
return true;
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
bool is_contiguous(
|
| 309 |
+
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const;
|
| 310 |
+
|
| 311 |
+
// The channels-last 1d can benefit the performance of some operators like
|
| 312 |
+
// conv1d. But the MemoryFormat enum has not covered this layout yet. Hence,
|
| 313 |
+
// we abstract a dedicated function to check channels-last 1d contiguous.
|
| 314 |
+
//
|
| 315 |
+
// Channels-last 1d:
|
| 316 |
+
// dims: n c l
|
| 317 |
+
// strides(nlc): c*l 1 c
|
| 318 |
+
bool is_channels_last_1d_contiguous() const {
|
| 319 |
+
if (dims_.size() != 3) {
|
| 320 |
+
return false;
|
| 321 |
+
}
|
| 322 |
+
return is_stride_one(1) && is_cont_with(2, 1) && is_cont_with(0, 2);
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
private:
|
| 326 |
+
bool is_cont_with(int cur_dim, int adjacent_dim) const;
|
| 327 |
+
bool is_stride_one(int cur_dim) const;
|
| 328 |
+
|
| 329 |
+
VarPtr base_handle_;
|
| 330 |
+
std::vector<ExprPtr> dims_;
|
| 331 |
+
std::vector<ExprPtr> strides_;
|
| 332 |
+
ExprPtr initializer_;
|
| 333 |
+
// qscale_ and qzero_ are used only for quantized dtypes Bufs: kQUInt8, kQInt8
|
| 334 |
+
ExprPtr qscale_;
|
| 335 |
+
ExprPtr qzero_;
|
| 336 |
+
};
|
| 337 |
+
|
| 338 |
+
class TORCH_API BufHandle : public ExprHandle {
|
| 339 |
+
public:
|
| 340 |
+
BufHandle(
|
| 341 |
+
const std::string& name_hint,
|
| 342 |
+
const std::vector<ExprHandle>& dims,
|
| 343 |
+
Dtype dtype)
|
| 344 |
+
: ExprHandle(Buf::make(name_hint, dims, dtype)) {}
|
| 345 |
+
|
| 346 |
+
BufHandle(
|
| 347 |
+
const std::string& name_hint,
|
| 348 |
+
const std::vector<ExprHandle>& dims,
|
| 349 |
+
const std::vector<ExprHandle>& strides,
|
| 350 |
+
Dtype dtype)
|
| 351 |
+
: ExprHandle(Buf::make(name_hint, dims, strides, dtype)) {}
|
| 352 |
+
|
| 353 |
+
BufHandle(const std::vector<ExprHandle>& dims, Dtype dtype)
|
| 354 |
+
: ExprHandle(Buf::make("_", dims, dtype)) {}
|
| 355 |
+
|
| 356 |
+
explicit BufHandle(Dtype dtype) : ExprHandle(Buf::make("_", {}, dtype)) {}
|
| 357 |
+
|
| 358 |
+
explicit BufHandle(BufPtr node) : ExprHandle(std::move(node)) {}
|
| 359 |
+
BufPtr node() const {
|
| 360 |
+
return static_to<Buf>(ExprHandle::node());
|
| 361 |
+
}
|
| 362 |
+
BufPtr node() {
|
| 363 |
+
return static_to<Buf>(ExprHandle::node());
|
| 364 |
+
}
|
| 365 |
+
|
| 366 |
+
template <typename... Ts>
|
| 367 |
+
inline ExprHandle load(const Ts&... ts) const;
|
| 368 |
+
|
| 369 |
+
template <typename T>
|
| 370 |
+
inline ExprHandle load(const std::vector<T>& args) const;
|
| 371 |
+
|
| 372 |
+
inline ExprHandle load(const std::vector<ExprHandle>& args) const;
|
| 373 |
+
|
| 374 |
+
StorePtr store(const std::vector<ExprHandle>& args, const ExprHandle& val)
|
| 375 |
+
const;
|
| 376 |
+
|
| 377 |
+
bool operator==(const BufHandle& other) const {
|
| 378 |
+
return this->node() == other.node();
|
| 379 |
+
}
|
| 380 |
+
bool operator!=(const BufHandle& other) const {
|
| 381 |
+
return !(*this == other);
|
| 382 |
+
}
|
| 383 |
+
|
| 384 |
+
const std::string& name_hint() const {
|
| 385 |
+
return this->node()->name_hint();
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
bool empty() const {
|
| 389 |
+
return (this->node() == nullptr);
|
| 390 |
+
}
|
| 391 |
+
|
| 392 |
+
size_t ndim() const {
|
| 393 |
+
return node()->ndim();
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
std::vector<ExprHandle> dims() const;
|
| 397 |
+
|
| 398 |
+
ExprHandle dim(size_t index) const {
|
| 399 |
+
return ExprHandle(node()->dim(index));
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
bool is_contiguous(
|
| 403 |
+
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const {
|
| 404 |
+
return node()->is_contiguous(memory_format);
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
bool is_channels_last_1d_contiguous() const {
|
| 408 |
+
return node()->is_channels_last_1d_contiguous();
|
| 409 |
+
}
|
| 410 |
+
};
|
| 411 |
+
|
| 412 |
+
// An expression to construct the underlying variable node.
|
| 413 |
+
// Note: do not store any info here, since it is often possible to slice this
|
| 414 |
+
// object. For example: VarHandle x('x'); ExprHandle x2 = x;
|
| 415 |
+
class TORCH_API VarHandle : public ExprHandle {
|
| 416 |
+
public:
|
| 417 |
+
// Creates an empty VarHandle whose base Var is set to nullptr.
|
| 418 |
+
VarHandle() : ExprHandle() {}
|
| 419 |
+
|
| 420 |
+
explicit VarHandle(Dtype dtype) : ExprHandle(Var::make(dtype)) {}
|
| 421 |
+
|
| 422 |
+
VarHandle(const std::string& name_hint, Dtype dtype)
|
| 423 |
+
: ExprHandle(Var::make(name_hint, dtype)) {}
|
| 424 |
+
|
| 425 |
+
explicit VarHandle(VarPtr node) : ExprHandle(std::move(node)) {}
|
| 426 |
+
|
| 427 |
+
VarPtr node() const {
|
| 428 |
+
return static_to<Var>(ExprHandle::node());
|
| 429 |
+
}
|
| 430 |
+
bool operator==(const VarHandle& other) const {
|
| 431 |
+
return this->node() == other.node();
|
| 432 |
+
}
|
| 433 |
+
bool operator!=(const VarHandle& other) const {
|
| 434 |
+
return !(*this == other);
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
const std::string& name_hint() const {
|
| 438 |
+
return this->node()->name_hint();
|
| 439 |
+
}
|
| 440 |
+
bool empty() const {
|
| 441 |
+
return (this->node() == nullptr);
|
| 442 |
+
}
|
| 443 |
+
};
|
| 444 |
+
|
| 445 |
+
template <class Op, class Base>
|
| 446 |
+
ExprPtr ExprNode<Op, Base>::accept_mutator(IRMutator* mutator) {
|
| 447 |
+
return mutator->mutate(static_to<Op>(Base::getptr()));
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
inline bool same_node(const ExprHandle& expr1, const ExprHandle& expr2) {
|
| 451 |
+
return expr1.AsNode<Expr>() == expr2.AsNode<Expr>();
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
TORCH_API ExprHandle sin(const ExprHandle& v);
|
| 455 |
+
TORCH_API ExprHandle cos(const ExprHandle& v);
|
| 456 |
+
TORCH_API ExprHandle tan(const ExprHandle& v);
|
| 457 |
+
TORCH_API ExprHandle asin(const ExprHandle& v);
|
| 458 |
+
TORCH_API ExprHandle acos(const ExprHandle& v);
|
| 459 |
+
TORCH_API ExprHandle atan(const ExprHandle& v);
|
| 460 |
+
TORCH_API ExprHandle sinh(const ExprHandle& v);
|
| 461 |
+
TORCH_API ExprHandle cosh(const ExprHandle& v);
|
| 462 |
+
TORCH_API ExprHandle tanh(const ExprHandle& v);
|
| 463 |
+
TORCH_API ExprHandle sigmoid(const ExprHandle& v);
|
| 464 |
+
TORCH_API ExprHandle exp(const ExprHandle& v);
|
| 465 |
+
TORCH_API ExprHandle expm1(const ExprHandle& v);
|
| 466 |
+
TORCH_API ExprHandle abs(const ExprHandle& v);
|
| 467 |
+
TORCH_API ExprHandle log(const ExprHandle& v);
|
| 468 |
+
TORCH_API ExprHandle fast_tanh(const ExprHandle& v);
|
| 469 |
+
TORCH_API ExprHandle fast_sigmoid(const ExprHandle& v);
|
| 470 |
+
TORCH_API ExprHandle fast_log(const ExprHandle& v);
|
| 471 |
+
TORCH_API ExprHandle log_vml(const ExprHandle& v);
|
| 472 |
+
TORCH_API ExprHandle log2(const ExprHandle& v);
|
| 473 |
+
TORCH_API ExprHandle log10(const ExprHandle& v);
|
| 474 |
+
TORCH_API ExprHandle log1p(const ExprHandle& v);
|
| 475 |
+
TORCH_API ExprHandle erf(const ExprHandle& v);
|
| 476 |
+
TORCH_API ExprHandle erfc(const ExprHandle& v);
|
| 477 |
+
TORCH_API ExprHandle sqrt(const ExprHandle& v);
|
| 478 |
+
TORCH_API ExprHandle rsqrt(const ExprHandle& v);
|
| 479 |
+
TORCH_API ExprHandle ceil(const ExprHandle& v);
|
| 480 |
+
TORCH_API ExprHandle floor(const ExprHandle& v);
|
| 481 |
+
TORCH_API ExprHandle round(const ExprHandle& v);
|
| 482 |
+
TORCH_API ExprHandle trunc(const ExprHandle& v);
|
| 483 |
+
TORCH_API ExprHandle frac(const ExprHandle& v);
|
| 484 |
+
TORCH_API ExprHandle lgamma(const ExprHandle& v);
|
| 485 |
+
TORCH_API ExprHandle atan2(const ExprHandle& v1, const ExprHandle& v2);
|
| 486 |
+
TORCH_API ExprHandle pow(const ExprHandle& v1, const ExprHandle& v2);
|
| 487 |
+
TORCH_API ExprHandle fmod(const ExprHandle& v1, const ExprHandle& v2);
|
| 488 |
+
TORCH_API ExprHandle remainder(const ExprHandle& v1, const ExprHandle& v2);
|
| 489 |
+
TORCH_API ExprHandle isnan(const ExprHandle& v1);
|
| 490 |
+
TORCH_API ExprHandle Relu(const ExprHandle& v1);
|
| 491 |
+
|
| 492 |
+
TORCH_API ExprHandle
|
| 493 |
+
ifThenElse(const ExprHandle& c, const ExprHandle& t, const ExprHandle& f);
|
| 494 |
+
|
| 495 |
+
TORCH_API ExprHandle expr_to_vec(ExprHandle v, int lanes);
|
| 496 |
+
|
| 497 |
+
} // namespace tensorexpr
|
| 498 |
+
} // namespace jit
|
| 499 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions.h
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/Config.h>
|
| 4 |
+
#include <ATen/Functions.h>
|
| 5 |
+
#include <c10/macros/Macros.h>
|
| 6 |
+
#include <torch/csrc/Export.h>
|
| 7 |
+
#include <cstdint>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
#define FOR_ALL_EXTERNAL_FUNCTIONS(_) \
|
| 11 |
+
_(nnc_aten_adaptive_avg_pool2d) \
|
| 12 |
+
_(nnc_aten_addmm) \
|
| 13 |
+
_(nnc_aten_conv2d) \
|
| 14 |
+
_(nnc_aten_conv1d) \
|
| 15 |
+
_(nnc_aten_conv1d_out) \
|
| 16 |
+
_(nnc_aten_dequantize) \
|
| 17 |
+
_(nnc_aten_dequantize_out) \
|
| 18 |
+
_(nnc_aten_embedding) \
|
| 19 |
+
_(nnc_aten_matmul) \
|
| 20 |
+
_(nnc_aten_mv) \
|
| 21 |
+
_(nnc_aten_mm) \
|
| 22 |
+
_(nnc_aten_mean) \
|
| 23 |
+
_(nnc_aten_max_red) \
|
| 24 |
+
_(nnc_aten_max_red_out) \
|
| 25 |
+
_(nnc_aten_quantized_conv1d) \
|
| 26 |
+
_(nnc_aten_quantized_conv1d_out) \
|
| 27 |
+
_(nnc_aten_quantized_conv2d) \
|
| 28 |
+
_(nnc_aten_quantized_conv2d_out) \
|
| 29 |
+
_(nnc_aten_quantized_conv2d_relu) \
|
| 30 |
+
_(nnc_aten_quantized_conv2d_relu_out) \
|
| 31 |
+
_(nnc_aten_quantized_linear) \
|
| 32 |
+
_(nnc_aten_quantized_linear_out) \
|
| 33 |
+
_(nnc_aten_quantized_linear_relu) \
|
| 34 |
+
_(nnc_aten_quantized_add) \
|
| 35 |
+
_(nnc_aten_quantized_cat) \
|
| 36 |
+
_(nnc_aten_quantized_mul) \
|
| 37 |
+
_(nnc_aten_quantized_mul_out) \
|
| 38 |
+
_(nnc_aten_quantized_mul_scalar) \
|
| 39 |
+
_(nnc_aten_quantized_mul_scalar_out) \
|
| 40 |
+
_(nnc_aten_quantized_relu) \
|
| 41 |
+
_(nnc_aten_quantized_sigmoid) \
|
| 42 |
+
_(nnc_aten_quantized_sigmoid_out) \
|
| 43 |
+
_(nnc_aten_quantize_per_tensor) \
|
| 44 |
+
_(nnc_aten_quantize_per_tensor_out) \
|
| 45 |
+
_(nnc_aten_triangular_solve) \
|
| 46 |
+
_(nnc_aten_upsample_nearest2d) \
|
| 47 |
+
_(nnc_aten_upsample_nearest2d_out) \
|
| 48 |
+
_(nnc_prepacked_conv2d_clamp_run) \
|
| 49 |
+
_(nnc_prepacked_linear_clamp_run)
|
| 50 |
+
|
| 51 |
+
#define DECLARE_EXTERNAL_FUNCTION(NAME) \
|
| 52 |
+
TORCH_API void NAME( \
|
| 53 |
+
int64_t bufs_num, \
|
| 54 |
+
void** buf_data, \
|
| 55 |
+
int64_t* buf_ranks, \
|
| 56 |
+
int64_t* buf_dims, \
|
| 57 |
+
int64_t* buf_strides, \
|
| 58 |
+
int8_t* buf_dtypes, \
|
| 59 |
+
int64_t args_num, \
|
| 60 |
+
int64_t* extra_args);
|
| 61 |
+
|
| 62 |
+
namespace torch {
|
| 63 |
+
namespace jit {
|
| 64 |
+
namespace tensorexpr {
|
| 65 |
+
struct QIData final {
|
| 66 |
+
double scale;
|
| 67 |
+
int64_t zero;
|
| 68 |
+
c10::ScalarType scalarType;
|
| 69 |
+
};
|
| 70 |
+
std::vector<at::Tensor> constructTensors(
|
| 71 |
+
int64_t bufs_num,
|
| 72 |
+
void** buf_data,
|
| 73 |
+
int64_t* buf_ranks,
|
| 74 |
+
int64_t* buf_dims,
|
| 75 |
+
int64_t* buf_strides,
|
| 76 |
+
int8_t* buf_dtypes,
|
| 77 |
+
c10::optional<std::vector<std::pair<size_t, QIData>>> qdataArg =
|
| 78 |
+
c10::nullopt);
|
| 79 |
+
|
| 80 |
+
std::vector<at::Tensor> constructTensors2(
|
| 81 |
+
int64_t bufs_in_num,
|
| 82 |
+
void** buf_data,
|
| 83 |
+
int64_t* buf_ranks,
|
| 84 |
+
int64_t* buf_dims,
|
| 85 |
+
int64_t* buf_strides,
|
| 86 |
+
int8_t* buf_dtypes,
|
| 87 |
+
c10::optional<std::vector<std::pair<size_t, QIData>>> qdataArg =
|
| 88 |
+
c10::nullopt,
|
| 89 |
+
size_t bufs_out_num = 0);
|
| 90 |
+
|
| 91 |
+
#ifdef C10_MOBILE
|
| 92 |
+
extern "C" {
|
| 93 |
+
#endif
|
| 94 |
+
void DispatchParallel(
|
| 95 |
+
int8_t* func,
|
| 96 |
+
int64_t start,
|
| 97 |
+
int64_t stop,
|
| 98 |
+
int8_t* packed_data) noexcept;
|
| 99 |
+
|
| 100 |
+
FOR_ALL_EXTERNAL_FUNCTIONS(DECLARE_EXTERNAL_FUNCTION)
|
| 101 |
+
#if AT_MKLDNN_ENABLED()
|
| 102 |
+
DECLARE_EXTERNAL_FUNCTION(nnc_mkldnn_prepacked_conv_run);
|
| 103 |
+
#endif
|
| 104 |
+
|
| 105 |
+
TORCH_API void nnc_aten_free(int64_t bufs_num, void** ptrs) noexcept;
|
| 106 |
+
|
| 107 |
+
#ifdef C10_MOBILE
|
| 108 |
+
} // extern "C"
|
| 109 |
+
#endif
|
| 110 |
+
|
| 111 |
+
} // namespace tensorexpr
|
| 112 |
+
} // namespace jit
|
| 113 |
+
} // namespace torch
|
| 114 |
+
|
| 115 |
+
#undef DECLARE_EXTERNAL_FUNCTION
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_core.h
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/ATen.h>
|
| 4 |
+
#include <ATen/Parallel.h>
|
| 5 |
+
#include <torch/csrc/Export.h>
|
| 6 |
+
#include <cstdint>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace jit {
|
| 10 |
+
namespace tensorexpr {
|
| 11 |
+
|
| 12 |
+
#ifdef C10_MOBILE
|
| 13 |
+
extern "C" {
|
| 14 |
+
#endif
|
| 15 |
+
void DispatchParallel(
|
| 16 |
+
int8_t* func,
|
| 17 |
+
int64_t start,
|
| 18 |
+
int64_t stop,
|
| 19 |
+
int8_t* packed_data) noexcept;
|
| 20 |
+
|
| 21 |
+
TORCH_API void nnc_aten_free(int64_t bufs_num, void** ptrs) noexcept;
|
| 22 |
+
|
| 23 |
+
#ifdef C10_MOBILE
|
| 24 |
+
} // extern "C"
|
| 25 |
+
#endif
|
| 26 |
+
|
| 27 |
+
} // namespace tensorexpr
|
| 28 |
+
} // namespace jit
|
| 29 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/external_functions_registry.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <cstdint>
|
| 5 |
+
#include <string>
|
| 6 |
+
#include <unordered_map>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace jit {
|
| 10 |
+
namespace tensorexpr {
|
| 11 |
+
|
| 12 |
+
// The external functions that could be called from NNC must have the same
|
| 13 |
+
// signature defined by `NNCExternalFunction`.
|
| 14 |
+
//
|
| 15 |
+
// Why this signature?
|
| 16 |
+
// It was picked for two reasons: 1) it should be generic enough to represent
|
| 17 |
+
// most of the ops we might want to call, 2) it should be possible to generate a
|
| 18 |
+
// code for this call in LLVM codegen.
|
| 19 |
+
// The first 5 parameters allow to pass any number of contiguous CPU tensors in
|
| 20 |
+
// case we need to run aten ops (TODO: support different devices). The first
|
| 21 |
+
// buffer in the array is assumed to be the output buffer. We couldn't use
|
| 22 |
+
// `at::Tensor` (or `c10::IValue`) type there directly as it would mean that
|
| 23 |
+
// we'd need to declare it in LLVM codegen in LLVM IR form, which would be very
|
| 24 |
+
// cumbersome and hard to maintain. Note that the dimensions of all tensors are
|
| 25 |
+
// concatenated into a single array buf_dims. We do not need to pass its length,
|
| 26 |
+
// since it can be deduced from total number of buffers and their ranks.
|
| 27 |
+
//
|
| 28 |
+
// The last 2 arguments allow to pass any non-tensor arguments encoded as an
|
| 29 |
+
// array of int64_t values. The way they are encoded is not specified and could
|
| 30 |
+
// be arbitrary - whatever the most convenient for the specific bridge function
|
| 31 |
+
// is.
|
| 32 |
+
//
|
| 33 |
+
// The bridge functions must not throw exceptions - properly propagating them
|
| 34 |
+
// from the generated code is too cumbersome, and thus all calls to functions
|
| 35 |
+
// that could throw must be wrapped with try-catch blocks.
|
| 36 |
+
using NNCExternalFunction = void (*)(
|
| 37 |
+
int64_t bufs_num,
|
| 38 |
+
void** buf_data,
|
| 39 |
+
int64_t* buf_ranks,
|
| 40 |
+
int64_t* buf_dims,
|
| 41 |
+
int64_t* buf_strides,
|
| 42 |
+
int8_t* buf_dtypes,
|
| 43 |
+
int64_t args_num,
|
| 44 |
+
int64_t* extra_args);
|
| 45 |
+
|
| 46 |
+
// Return a global map "function-name" -> "function-pointer" for all registered
|
| 47 |
+
// in NNC external functions
|
| 48 |
+
TORCH_API std::unordered_map<std::string, NNCExternalFunction>&
|
| 49 |
+
getNNCFunctionRegistry();
|
| 50 |
+
|
| 51 |
+
// To register a new external function in NNC one needs to create an instance of
|
| 52 |
+
// this struct
|
| 53 |
+
struct RegisterNNCExternalFunction {
|
| 54 |
+
RegisterNNCExternalFunction(const std::string& name, NNCExternalFunction fn) {
|
| 55 |
+
getNNCFunctionRegistry()[name] = fn;
|
| 56 |
+
}
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
} // namespace tensorexpr
|
| 60 |
+
} // namespace jit
|
| 61 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/fwd_decls.h
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/ScalarType.h>
|
| 3 |
+
#include <memory>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace jit {
|
| 7 |
+
namespace tensorexpr {
|
| 8 |
+
|
| 9 |
+
template <typename Node>
|
| 10 |
+
using NodePtr = std::shared_ptr<Node>;
|
| 11 |
+
|
| 12 |
+
template <typename To, typename From>
|
| 13 |
+
NodePtr<To> to(NodePtr<From> x) {
|
| 14 |
+
return std::dynamic_pointer_cast<To>(x);
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
template <typename To, typename From>
|
| 18 |
+
NodePtr<To> static_to(NodePtr<From> x) {
|
| 19 |
+
return std::static_pointer_cast<To>(x);
|
| 20 |
+
}
|
| 21 |
+
|
| 22 |
+
template <typename Node, typename... Args>
|
| 23 |
+
NodePtr<Node> alloc(Args&&... args) {
|
| 24 |
+
return std::make_shared<Node>(std::forward<Args>(args)...);
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
class Buf;
|
| 28 |
+
class Expr;
|
| 29 |
+
class Stmt;
|
| 30 |
+
class Var;
|
| 31 |
+
|
| 32 |
+
using BufPtr = NodePtr<Buf>;
|
| 33 |
+
using ExprPtr = NodePtr<Expr>;
|
| 34 |
+
using StmtPtr = NodePtr<Stmt>;
|
| 35 |
+
using VarPtr = NodePtr<Var>;
|
| 36 |
+
|
| 37 |
+
class ExprHandle;
|
| 38 |
+
class VarHandle;
|
| 39 |
+
class BufHandle;
|
| 40 |
+
|
| 41 |
+
class Add;
|
| 42 |
+
class And;
|
| 43 |
+
class BitCast;
|
| 44 |
+
class Broadcast;
|
| 45 |
+
class Cast;
|
| 46 |
+
class CompareSelect;
|
| 47 |
+
class Div;
|
| 48 |
+
class IfThenElse;
|
| 49 |
+
class Intrinsics;
|
| 50 |
+
class Let;
|
| 51 |
+
class Load;
|
| 52 |
+
class Lshift;
|
| 53 |
+
class Max;
|
| 54 |
+
class MaxTerm;
|
| 55 |
+
class Min;
|
| 56 |
+
class MinTerm;
|
| 57 |
+
class Mod;
|
| 58 |
+
class Mul;
|
| 59 |
+
class Or;
|
| 60 |
+
class Polynomial;
|
| 61 |
+
class Ramp;
|
| 62 |
+
class ReduceOp;
|
| 63 |
+
class RoundOff;
|
| 64 |
+
class Rshift;
|
| 65 |
+
class Store;
|
| 66 |
+
class Sub;
|
| 67 |
+
class Term;
|
| 68 |
+
class Xor;
|
| 69 |
+
using AddPtr = NodePtr<Add>;
|
| 70 |
+
using AndPtr = NodePtr<And>;
|
| 71 |
+
using BitCastPtr = NodePtr<BitCast>;
|
| 72 |
+
using BroadcastPtr = NodePtr<Broadcast>;
|
| 73 |
+
using CastPtr = NodePtr<Cast>;
|
| 74 |
+
using CompareSelectPtr = NodePtr<CompareSelect>;
|
| 75 |
+
using DivPtr = NodePtr<Div>;
|
| 76 |
+
using IfThenElsePtr = NodePtr<IfThenElse>;
|
| 77 |
+
using IntrinsicsPtr = NodePtr<Intrinsics>;
|
| 78 |
+
using LetPtr = NodePtr<Let>;
|
| 79 |
+
using LoadPtr = NodePtr<Load>;
|
| 80 |
+
using LshiftPtr = NodePtr<Lshift>;
|
| 81 |
+
using MaxPtr = NodePtr<Max>;
|
| 82 |
+
using MaxTermPtr = NodePtr<MaxTerm>;
|
| 83 |
+
using MinPtr = NodePtr<Min>;
|
| 84 |
+
using MinTermPtr = NodePtr<MinTerm>;
|
| 85 |
+
using ModPtr = NodePtr<Mod>;
|
| 86 |
+
using MulPtr = NodePtr<Mul>;
|
| 87 |
+
using OrPtr = NodePtr<Or>;
|
| 88 |
+
using PolynomialPtr = NodePtr<Polynomial>;
|
| 89 |
+
using RampPtr = NodePtr<Ramp>;
|
| 90 |
+
using ReduceOpPtr = NodePtr<ReduceOp>;
|
| 91 |
+
using RoundOffPtr = NodePtr<RoundOff>;
|
| 92 |
+
using RshiftPtr = NodePtr<Rshift>;
|
| 93 |
+
using StorePtr = NodePtr<Store>;
|
| 94 |
+
using SubPtr = NodePtr<Sub>;
|
| 95 |
+
using TermPtr = NodePtr<Term>;
|
| 96 |
+
using XorPtr = NodePtr<Xor>;
|
| 97 |
+
|
| 98 |
+
class Allocate;
|
| 99 |
+
class AtomicAdd;
|
| 100 |
+
class Block;
|
| 101 |
+
class Cond;
|
| 102 |
+
class ExternalCall;
|
| 103 |
+
class ExternalCallWithAlloc;
|
| 104 |
+
class For;
|
| 105 |
+
class Free;
|
| 106 |
+
class FreeExt;
|
| 107 |
+
class PlacementAllocate;
|
| 108 |
+
class SyncThreads;
|
| 109 |
+
using AllocatePtr = NodePtr<Allocate>;
|
| 110 |
+
using AtomicAddPtr = NodePtr<AtomicAdd>;
|
| 111 |
+
using BlockPtr = NodePtr<Block>;
|
| 112 |
+
using CondPtr = NodePtr<Cond>;
|
| 113 |
+
using ExternalCallPtr = NodePtr<ExternalCall>;
|
| 114 |
+
using ExternalCallWithAllocPtr = NodePtr<ExternalCallWithAlloc>;
|
| 115 |
+
using ForPtr = NodePtr<For>;
|
| 116 |
+
using FreePtr = NodePtr<Free>;
|
| 117 |
+
using FreeExtPtr = NodePtr<FreeExt>;
|
| 118 |
+
using PlacementAllocatePtr = NodePtr<PlacementAllocate>;
|
| 119 |
+
using SyncThreadsPtr = NodePtr<SyncThreads>;
|
| 120 |
+
|
| 121 |
+
#define IMM_DECLARE(Type, Name) \
|
| 122 |
+
class Name##Imm; \
|
| 123 |
+
using Name##ImmPtr = NodePtr<Name##Imm>;
|
| 124 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE);
|
| 125 |
+
#undef IMM_DECLARE
|
| 126 |
+
|
| 127 |
+
} // namespace tensorexpr
|
| 128 |
+
} // namespace jit
|
| 129 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/graph_opt.h
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace jit {
|
| 7 |
+
namespace tensorexpr {
|
| 8 |
+
|
| 9 |
+
// Optimize aten::cat ops in the given subgraph.
|
| 10 |
+
//
|
| 11 |
+
// Moving users of cat to its inputs.
|
| 12 |
+
// Cat ops get lowered into multiple loops, one per input. When the result
|
| 13 |
+
// of cat is used by some other op, it results in a situation where inlining
|
| 14 |
+
// of cat does not happen. This in turn results in intermediate buffers
|
| 15 |
+
// being created for the result of cat, since it is not inlined.
|
| 16 |
+
//
|
| 17 |
+
// For example, consider the following graph:
|
| 18 |
+
// graph(%x : Float(10, strides=[1], device=cpu),
|
| 19 |
+
// %y : Float(20, strides=[1], device=cpu)):
|
| 20 |
+
// %dim : int = prim::Constant[value=0]()
|
| 21 |
+
// %xy_list : Tensor[] = prim::ListConstruct(%x, %y)
|
| 22 |
+
// %cat : Float(60, strides=[1], device=cpu) = aten::cat(%xy_list, %dim)
|
| 23 |
+
// %5 : Float(60, strides=[1], device=cpu) = aten::log(%cat)
|
| 24 |
+
// return (%5))IR";
|
| 25 |
+
//
|
| 26 |
+
// This will get lowered into:
|
| 27 |
+
// Allocate(aten_cat);
|
| 28 |
+
// for (...)
|
| 29 |
+
// aten_cat[...] = x[...]
|
| 30 |
+
// for (...)
|
| 31 |
+
// aten_cat[...] = y[...]
|
| 32 |
+
// for (...)
|
| 33 |
+
// aten_log[...] = log(aten_cat[...])
|
| 34 |
+
// Free(aten_cat);
|
| 35 |
+
// Note that aten_cat is not inlined into aten_log and it results in
|
| 36 |
+
// an intermediate buffer allocation as well.
|
| 37 |
+
//
|
| 38 |
+
// Optimization:
|
| 39 |
+
// We move the ops that use the result of `cat` into its inputs whenever
|
| 40 |
+
// possible.
|
| 41 |
+
//
|
| 42 |
+
// The graph above will be transformed to:
|
| 43 |
+
// graph(%x : Float(10, strides=[1], device=cpu),
|
| 44 |
+
// %y : Float(20, strides=[1], device=cpu)):
|
| 45 |
+
// %3 : int = prim::Constant[value=0]()
|
| 46 |
+
// %7 : Float(10, strides=[1], device=cpu) = aten::log(%x)
|
| 47 |
+
// %8 : Float(20, strides=[1], device=cpu) = aten::log(%y)
|
| 48 |
+
// %9 : Tensor[] = prim::ListConstruct(%7, %8)
|
| 49 |
+
// %10 : Float(60, strides=[1], device=cpu) = aten::cat(%9, %3)
|
| 50 |
+
// return (%10)
|
| 51 |
+
//
|
| 52 |
+
// This will get lowered into:
|
| 53 |
+
// for (...)
|
| 54 |
+
// aten_cat[...] = log(x[...])
|
| 55 |
+
// for (...)
|
| 56 |
+
// aten_cat[...] = log(y[...])
|
| 57 |
+
// aten_cat is the output buffer here.
|
| 58 |
+
|
| 59 |
+
bool OptimizeCat(const std::shared_ptr<Graph>& graph);
|
| 60 |
+
|
| 61 |
+
TORCH_API void annotateInputShapes(
|
| 62 |
+
const std::shared_ptr<Graph>& graph,
|
| 63 |
+
const std::vector<c10::optional<at::Tensor>>& example_inputs);
|
| 64 |
+
TORCH_API std::shared_ptr<Graph> removeUnusedSelfArgument(
|
| 65 |
+
const std::shared_ptr<Graph>& graph);
|
| 66 |
+
TORCH_API std::shared_ptr<Graph> removeGraphOutput(
|
| 67 |
+
const std::shared_ptr<Graph>& graph,
|
| 68 |
+
size_t idx);
|
| 69 |
+
TORCH_API std::shared_ptr<Graph> replaceListOutputWithTuple(
|
| 70 |
+
const std::shared_ptr<Graph>& graph);
|
| 71 |
+
|
| 72 |
+
// Perform \p ITERS rounds of "trimming" for the given \p GRAPH.
|
| 73 |
+
//
|
| 74 |
+
// Trimming means that we try to remove a small portion of the graph while
|
| 75 |
+
// keeping it valid. This is useful for debugging when we try to find a minimal
|
| 76 |
+
// example reproducing the issue at hand. When ITERS is 0, the graph remains
|
| 77 |
+
// unchanged, when ITERS is a big number, the graph usually becomes empty.
|
| 78 |
+
TORCH_API std::shared_ptr<Graph> trimGraph(
|
| 79 |
+
const std::shared_ptr<Graph>& graph,
|
| 80 |
+
int64_t iters);
|
| 81 |
+
|
| 82 |
+
// Scan all values in the given graph and replace each dimension with a size Xi
|
| 83 |
+
// present in \p SIZES with a symbolic shape Yi. Return a vector of symbol
|
| 84 |
+
// values [Y0, Y1, .., Yn].
|
| 85 |
+
//
|
| 86 |
+
// For example:
|
| 87 |
+
// Input:
|
| 88 |
+
// graph(%x : Float(10, 20, 30, 40)):
|
| 89 |
+
// %y : Float(10, 20, 30, 40) = aten::relu(%x)
|
| 90 |
+
// return %y
|
| 91 |
+
//
|
| 92 |
+
// If we run makeShapesSymbolic(graph, {20, 40}), then we'll get:
|
| 93 |
+
//
|
| 94 |
+
// graph(%x : Float(10, SS(-3), 30, SS(-5))):
|
| 95 |
+
// %y : Float(10, SS(-3), 30, SS(-5)) = aten::relu(%x)
|
| 96 |
+
// return %y
|
| 97 |
+
//
|
| 98 |
+
// and get {-3, -5} as the return value.
|
| 99 |
+
TORCH_API std::vector<int64_t> makeShapesSymbolic(
|
| 100 |
+
std::shared_ptr<Graph>& graph,
|
| 101 |
+
const std::vector<int64_t>& sizes);
|
| 102 |
+
|
| 103 |
+
// Inspect the graph and report whether it can be converted to TE IR.
|
| 104 |
+
// TODO: add error reporting for graphs that can't be converted.
|
| 105 |
+
TORCH_API bool isGraphCompilable(const std::shared_ptr<Graph>& graph);
|
| 106 |
+
|
| 107 |
+
// Examine the graph and (hackily) fill in missing tensor type info, such as
|
| 108 |
+
// scalar type, device, and strides. Ideally, this should be done by a proper
|
| 109 |
+
// dtype/device/shape propagation passes, but until they are ready we can use
|
| 110 |
+
// this, not always correct, workaround pass.
|
| 111 |
+
TORCH_API void fixupMissingShapeInfo(const std::shared_ptr<Graph>& graph);
|
| 112 |
+
|
| 113 |
+
} // namespace tensorexpr
|
| 114 |
+
} // namespace jit
|
| 115 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/half_support.h
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace jit {
|
| 10 |
+
namespace tensorexpr {
|
| 11 |
+
|
| 12 |
+
// Walk the Statement looking for Half size loads/stores.
|
| 13 |
+
class HalfChecker : public IRVisitor {
|
| 14 |
+
public:
|
| 15 |
+
HalfChecker(const std::vector<CodeGen::BufferArg>& args) {
|
| 16 |
+
for (const auto& BA : args) {
|
| 17 |
+
hasHalf_ |= BA.dtype().scalar_type() == ScalarType::Half;
|
| 18 |
+
}
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
bool hasHalf() const {
|
| 22 |
+
return hasHalf_;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
bool hasBFloat16() const {
|
| 26 |
+
return hasBFloat16_;
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
void visit(LoadPtr v) override {
|
| 30 |
+
hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half;
|
| 31 |
+
hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16;
|
| 32 |
+
IRVisitor::visit(v);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
void visit(StorePtr v) override {
|
| 36 |
+
hasHalf_ |= v->buf()->dtype().scalar_type() == ScalarType::Half;
|
| 37 |
+
hasBFloat16_ |= v->buf()->dtype().scalar_type() == ScalarType::BFloat16;
|
| 38 |
+
IRVisitor::visit(v);
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
void visit(HalfImmPtr v) override {
|
| 42 |
+
hasHalf_ = true;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
void visit(BFloat16ImmPtr v) override {
|
| 46 |
+
hasBFloat16_ = true;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
void visit(CastPtr v) override {
|
| 50 |
+
hasHalf_ |= v->dtype().scalar_type() == ScalarType::Half;
|
| 51 |
+
hasBFloat16_ |= v->dtype().scalar_type() == ScalarType::BFloat16;
|
| 52 |
+
IRVisitor::visit(v);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
private:
|
| 56 |
+
bool hasHalf_{false};
|
| 57 |
+
bool hasBFloat16_{false};
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 61 |
+
class HalfRewriter : public IRMutator {
|
| 62 |
+
ExprPtr mutate(LoadPtr v) override {
|
| 63 |
+
ExprPtr child = IRMutator::mutate(v);
|
| 64 |
+
if (!isHalf(child)) {
|
| 65 |
+
return child;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
ExprPtr ret = alloc<Cast>(
|
| 69 |
+
child->dtype().cloneWithScalarType(ScalarType::Float), child);
|
| 70 |
+
|
| 71 |
+
inserted_half_casts_.insert(ret);
|
| 72 |
+
return ret;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
StmtPtr mutate(StorePtr v) override {
|
| 76 |
+
// Since mutation changes the `value()` expression in-place, we need to
|
| 77 |
+
// get the dtype of the `value()` before that is mutated.
|
| 78 |
+
auto newType = v->value()->dtype();
|
| 79 |
+
ExprPtr new_val = v->value()->accept_mutator(this);
|
| 80 |
+
auto bufType = v->buf()->dtype();
|
| 81 |
+
|
| 82 |
+
if (isHalf(newType.scalar_type())) {
|
| 83 |
+
new_val = alloc<Cast>(newType, new_val);
|
| 84 |
+
inserted_half_casts_.insert(new_val);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
// The scalar_type of value is not Half while the buf is Half
|
| 88 |
+
if (!isHalf(newType.scalar_type()) && isHalf(bufType.scalar_type())) {
|
| 89 |
+
new_val = alloc<Cast>(
|
| 90 |
+
newType.cloneWithScalarType(bufType.scalar_type()), new_val);
|
| 91 |
+
inserted_half_casts_.insert(new_val);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
v->set_value(new_val);
|
| 95 |
+
return v;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
ExprPtr mutate(HalfImmPtr v) override {
|
| 99 |
+
return alloc<Cast>(kFloat, v);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
ExprPtr mutate(BFloat16ImmPtr v) override {
|
| 103 |
+
return alloc<Cast>(kFloat, v);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
ExprPtr mutate(CastPtr v) override {
|
| 107 |
+
ExprPtr child = v->src_value()->accept_mutator(this);
|
| 108 |
+
|
| 109 |
+
// just don't allow half casts we didn't insert.
|
| 110 |
+
if (isHalf(v)) {
|
| 111 |
+
if (inserted_half_casts_.count(v) < 1) {
|
| 112 |
+
v->set_src_value(child);
|
| 113 |
+
v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat));
|
| 114 |
+
return v;
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
// Remove Half(Float()) and friends.
|
| 119 |
+
CastPtr cast_child = to<Cast>(child);
|
| 120 |
+
if (cast_child) {
|
| 121 |
+
auto cast_to_double = v->dtype().scalar_type() == ScalarType::Double;
|
| 122 |
+
auto from_half = isHalf(cast_child->src_value());
|
| 123 |
+
// Cannot simplify the double(float(half)) to double(half) as NNC does
|
| 124 |
+
// not support cast BF16 to double directly.
|
| 125 |
+
auto not_cast_half_to_doulbe = !(cast_to_double && from_half);
|
| 126 |
+
if (v->dtype().is_floating_point() &&
|
| 127 |
+
cast_child->dtype().is_floating_point() && not_cast_half_to_doulbe) {
|
| 128 |
+
return alloc<Cast>(v->dtype(), cast_child->src_value());
|
| 129 |
+
}
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
if (child == v->src_value()) {
|
| 133 |
+
return v;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
return alloc<Cast>(v->dtype(), child);
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
StmtPtr mutate(LetPtr v) override {
|
| 140 |
+
if (isHalf(v->var()->dtype().scalar_type())) {
|
| 141 |
+
VarPtr load_new_var = alloc<Var>(v->var()->name_hint(), kFloat);
|
| 142 |
+
ExprPtr new_value = alloc<Cast>(
|
| 143 |
+
v->var()->dtype().cloneWithScalarType(ScalarType::Float),
|
| 144 |
+
v->value()->accept_mutator(this));
|
| 145 |
+
var_map[v->var()] = load_new_var;
|
| 146 |
+
|
| 147 |
+
return alloc<Let>(load_new_var, new_value);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
return IRMutator::mutate(v);
|
| 151 |
+
}
|
| 152 |
+
|
| 153 |
+
ExprPtr mutate(VarPtr v) override {
|
| 154 |
+
auto it = var_map.find(v);
|
| 155 |
+
if (it != var_map.end()) {
|
| 156 |
+
return it->second;
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
return v;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
template <typename T>
|
| 163 |
+
ExprPtr mutateArithmetic(T v) {
|
| 164 |
+
IRMutator::mutate(v);
|
| 165 |
+
if (isHalf(v)) {
|
| 166 |
+
v->set_dtype(v->dtype().cloneWithScalarType(c10::kFloat));
|
| 167 |
+
}
|
| 168 |
+
return v;
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
ExprPtr mutate(AddPtr v) override {
|
| 172 |
+
return mutateArithmetic(v);
|
| 173 |
+
}
|
| 174 |
+
ExprPtr mutate(SubPtr v) override {
|
| 175 |
+
return mutateArithmetic(v);
|
| 176 |
+
}
|
| 177 |
+
ExprPtr mutate(MulPtr v) override {
|
| 178 |
+
return mutateArithmetic(v);
|
| 179 |
+
}
|
| 180 |
+
ExprPtr mutate(DivPtr v) override {
|
| 181 |
+
return mutateArithmetic(v);
|
| 182 |
+
}
|
| 183 |
+
ExprPtr mutate(MaxPtr v) override {
|
| 184 |
+
return mutateArithmetic(v);
|
| 185 |
+
}
|
| 186 |
+
ExprPtr mutate(MinPtr v) override {
|
| 187 |
+
return mutateArithmetic(v);
|
| 188 |
+
}
|
| 189 |
+
ExprPtr mutate(CompareSelectPtr v) override {
|
| 190 |
+
return mutateArithmetic(v);
|
| 191 |
+
}
|
| 192 |
+
ExprPtr mutate(BroadcastPtr v) override {
|
| 193 |
+
return mutateArithmetic(v);
|
| 194 |
+
}
|
| 195 |
+
ExprPtr mutate(IfThenElsePtr v) override {
|
| 196 |
+
return mutateArithmetic(v);
|
| 197 |
+
}
|
| 198 |
+
ExprPtr mutate(IntrinsicsPtr v) override {
|
| 199 |
+
return mutateArithmetic(v);
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
private:
|
| 203 |
+
static bool isHalf(ScalarType st) {
|
| 204 |
+
return st == ScalarType::Half || st == ScalarType::BFloat16;
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
static bool isHalf(ExprPtr v) {
|
| 208 |
+
return isHalf(v->dtype().scalar_type());
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
std::unordered_set<ExprPtr> inserted_half_casts_;
|
| 212 |
+
std::unordered_map<VarPtr, VarPtr> var_map;
|
| 213 |
+
};
|
| 214 |
+
|
| 215 |
+
} // namespace tensorexpr
|
| 216 |
+
} // namespace jit
|
| 217 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/hash_provider.h
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir_printer.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 7 |
+
|
| 8 |
+
#include <utility>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace jit {
|
| 12 |
+
namespace tensorexpr {
|
| 13 |
+
|
| 14 |
+
struct TORCH_API SimplifierHashType {
|
| 15 |
+
SimplifierHashType() = default;
|
| 16 |
+
explicit SimplifierHashType(size_t s) : _h(s) {}
|
| 17 |
+
|
| 18 |
+
bool operator==(const SimplifierHashType& other) const;
|
| 19 |
+
bool operator!=(const SimplifierHashType& other) const;
|
| 20 |
+
bool operator<(const SimplifierHashType& other) const;
|
| 21 |
+
bool operator==(const size_t other) const;
|
| 22 |
+
bool operator!=(const size_t other) const;
|
| 23 |
+
|
| 24 |
+
size_t _h{0};
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
} // namespace tensorexpr
|
| 28 |
+
} // namespace jit
|
| 29 |
+
} // namespace torch
|
| 30 |
+
|
| 31 |
+
namespace std {
|
| 32 |
+
template <>
|
| 33 |
+
struct hash<torch::jit::tensorexpr::SimplifierHashType> {
|
| 34 |
+
size_t operator()(const torch::jit::tensorexpr::SimplifierHashType& k) const {
|
| 35 |
+
return k._h;
|
| 36 |
+
}
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
} // namespace std
|
| 40 |
+
|
| 41 |
+
namespace torch {
|
| 42 |
+
namespace jit {
|
| 43 |
+
namespace tensorexpr {
|
| 44 |
+
|
| 45 |
+
#define CACHE_GUARD() \
|
| 46 |
+
if (cachedHash(v)) { \
|
| 47 |
+
return; \
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
class Term;
|
| 51 |
+
class Polynomial;
|
| 52 |
+
|
| 53 |
+
/* Expression hasher providing comparable values representing sub-exprs.
|
| 54 |
+
* Uses memoization to avoid excessive recursion. */
|
| 55 |
+
class TORCH_API HashProvider : public IRVisitor {
|
| 56 |
+
public:
|
| 57 |
+
template <class T>
|
| 58 |
+
SimplifierHashType hash(T e) {
|
| 59 |
+
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
|
| 60 |
+
e->accept(this);
|
| 61 |
+
return hashOf(e);
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
bool cachedHash(ExprPtr e) {
|
| 65 |
+
return exprToHash_.find(e) != exprToHash_.end();
|
| 66 |
+
}
|
| 67 |
+
bool cachedHash(StmtPtr s) {
|
| 68 |
+
return stmtToHash_.find(s) != stmtToHash_.end();
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
void clearCache() {
|
| 72 |
+
exprToHash_.clear();
|
| 73 |
+
stmtToHash_.clear();
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
void visit(AddPtr v) override;
|
| 77 |
+
void visit(SubPtr v) override;
|
| 78 |
+
void visit(MulPtr v) override;
|
| 79 |
+
void visit(DivPtr v) override;
|
| 80 |
+
void visit(ModPtr v) override;
|
| 81 |
+
void visit(RoundOffPtr v) override;
|
| 82 |
+
void visit(MaxPtr v) override;
|
| 83 |
+
void visit(MinPtr v) override;
|
| 84 |
+
void visit(AndPtr v) override;
|
| 85 |
+
void visit(OrPtr v) override;
|
| 86 |
+
void visit(XorPtr v) override;
|
| 87 |
+
void visit(LshiftPtr v) override;
|
| 88 |
+
void visit(RshiftPtr v) override;
|
| 89 |
+
void visit(CompareSelectPtr v) override;
|
| 90 |
+
|
| 91 |
+
// NOLINTNEXTLINE
|
| 92 |
+
#define IMM_VISIT(Type, Name) \
|
| 93 |
+
void visit(Name##ImmPtr v) override { \
|
| 94 |
+
CACHE_GUARD(); \
|
| 95 |
+
putHash(v, hash_combine(#Name, v->value())); \
|
| 96 |
+
}
|
| 97 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_VISIT);
|
| 98 |
+
#undef IMM_VISIT
|
| 99 |
+
|
| 100 |
+
void visit(CastPtr v) override;
|
| 101 |
+
void visit(VarPtr v) override;
|
| 102 |
+
void visit(RampPtr v) override;
|
| 103 |
+
void visit(LoadPtr v) override;
|
| 104 |
+
void visit(StorePtr v) override;
|
| 105 |
+
void visit(BlockPtr v) override;
|
| 106 |
+
void visit(ForPtr v) override;
|
| 107 |
+
void visit(BroadcastPtr v) override;
|
| 108 |
+
void visit(IfThenElsePtr v) override;
|
| 109 |
+
void visit(IntrinsicsPtr v) override;
|
| 110 |
+
void visit(AllocatePtr v) override;
|
| 111 |
+
void visit(FreePtr v) override;
|
| 112 |
+
void visit(CondPtr v) override;
|
| 113 |
+
void visit(TermPtr v) override;
|
| 114 |
+
void visit(PolynomialPtr v) override;
|
| 115 |
+
void visit(MaxTermPtr v) override;
|
| 116 |
+
void visit(MinTermPtr v) override;
|
| 117 |
+
|
| 118 |
+
template <typename... Types>
|
| 119 |
+
SimplifierHashType hash_combine(const Types&... args) {
|
| 120 |
+
SimplifierHashType seed;
|
| 121 |
+
_hash_combine(seed, args...);
|
| 122 |
+
return seed;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
private:
|
| 126 |
+
SimplifierHashType hashOf(ExprPtr e) {
|
| 127 |
+
auto it = exprToHash_.find(e);
|
| 128 |
+
if (it != exprToHash_.end()) {
|
| 129 |
+
return it->second;
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
// As a failsafe fall back to IRPrinter.
|
| 133 |
+
std::stringstream ss;
|
| 134 |
+
IRPrinter printer(ss);
|
| 135 |
+
e->accept(&printer);
|
| 136 |
+
SimplifierHashType hash = SimplifierHashType(te_hash(ss.str()));
|
| 137 |
+
putHash(std::move(e), hash);
|
| 138 |
+
|
| 139 |
+
return hash;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
SimplifierHashType hashOf(StmtPtr s) {
|
| 143 |
+
auto it = stmtToHash_.find(s);
|
| 144 |
+
if (it != stmtToHash_.end()) {
|
| 145 |
+
return it->second;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
// As a failsafe fall back to IRPrinter.
|
| 149 |
+
std::stringstream ss;
|
| 150 |
+
IRPrinter printer(ss);
|
| 151 |
+
s->accept(&printer);
|
| 152 |
+
SimplifierHashType hash = SimplifierHashType(te_hash(ss.str()));
|
| 153 |
+
putHash(std::move(s), hash);
|
| 154 |
+
|
| 155 |
+
return hash;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
// Hash funcs for various types, numbers are random.
|
| 159 |
+
template <typename T>
|
| 160 |
+
void _hash_combine(SimplifierHashType& seed, const T& val) {
|
| 161 |
+
seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
void _hash_combine(SimplifierHashType& seed, const char* val) {
|
| 165 |
+
seed._h ^= te_hash(val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
// at:::Half doesn't have a prime_number_hash, so cast to short.
|
| 169 |
+
void _hash_combine(SimplifierHashType& seed, const at::Half& val) {
|
| 170 |
+
seed._h ^=
|
| 171 |
+
te_hash((uint16_t)val) + 0x1f752c19 + (seed._h << 7) + (seed._h >> 4);
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
void _hash_combine(SimplifierHashType& seed, const Dtype& val) {
|
| 175 |
+
seed._h ^= te_hash(val.ToCppString()) + 0x1f752c19 + (seed._h << 7) +
|
| 176 |
+
(seed._h >> 4);
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
void _hash_combine(SimplifierHashType& seed, ExprPtr e) {
|
| 180 |
+
_hash_combine(seed, hash(std::move(e)));
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
template <typename T, typename... Types>
|
| 184 |
+
void _hash_combine(
|
| 185 |
+
SimplifierHashType& seed,
|
| 186 |
+
const T& val,
|
| 187 |
+
const Types&... args) {
|
| 188 |
+
_hash_combine(seed, val);
|
| 189 |
+
_hash_combine(seed, args...);
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
void putHash(ExprPtr e, SimplifierHashType h) {
|
| 193 |
+
auto res = exprToHash_.emplace(e, h);
|
| 194 |
+
if (res.second == false) {
|
| 195 |
+
// This is always a logic bug since we should check the cache first.
|
| 196 |
+
throw std::runtime_error("hash collision");
|
| 197 |
+
}
|
| 198 |
+
}
|
| 199 |
+
void putHash(StmtPtr s, SimplifierHashType h) {
|
| 200 |
+
auto res = stmtToHash_.emplace(s, h);
|
| 201 |
+
if (res.second == false) {
|
| 202 |
+
// This is always a logic bug since we should check the cache first.
|
| 203 |
+
throw std::runtime_error("hash collision");
|
| 204 |
+
}
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
std::unordered_map<ExprPtr, SimplifierHashType> exprToHash_;
|
| 208 |
+
std::unordered_map<StmtPtr, SimplifierHashType> stmtToHash_;
|
| 209 |
+
UniqueNameManager name_manager_;
|
| 210 |
+
|
| 211 |
+
size_t te_hash(SimplifierHashType val) {
|
| 212 |
+
return val._h;
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
size_t te_hash(int64_t val) {
|
| 216 |
+
// put the thing down.
|
| 217 |
+
size_t h = val ^ 0x647AA4D20C0B;
|
| 218 |
+
// bit flip it.
|
| 219 |
+
size_t h2 = ~h;
|
| 220 |
+
// and reverse byte order.
|
| 221 |
+
size_t h3 = 0;
|
| 222 |
+
for (unsigned int i = 0; i < 64; i += 8) {
|
| 223 |
+
h3 |= ((h2 >> i) & 0xFF) << (64 - i - 8);
|
| 224 |
+
}
|
| 225 |
+
return h3;
|
| 226 |
+
}
|
| 227 |
+
|
| 228 |
+
size_t te_hash(int32_t val) {
|
| 229 |
+
int64_t v2 = val;
|
| 230 |
+
return te_hash(v2);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
size_t te_hash(uint32_t val) {
|
| 234 |
+
int64_t v2 = val;
|
| 235 |
+
return te_hash(v2);
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
size_t te_hash(uint64_t val) {
|
| 239 |
+
int64_t v2 = val;
|
| 240 |
+
return te_hash(v2);
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
size_t te_hash(int16_t val) {
|
| 244 |
+
int64_t v2 = val;
|
| 245 |
+
return te_hash(v2);
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
size_t te_hash(std::string val) {
|
| 249 |
+
size_t hash{0};
|
| 250 |
+
int64_t intval{0};
|
| 251 |
+
int64_t s = val.size() - 1;
|
| 252 |
+
while (s >= 0) {
|
| 253 |
+
for (unsigned int i = 0; i < 8; ++i) {
|
| 254 |
+
if (s < 0)
|
| 255 |
+
break;
|
| 256 |
+
// NOLINTNEXTLINE(bugprone-signed-char-misuse)
|
| 257 |
+
int64_t c = val.data()[s];
|
| 258 |
+
intval |= (c << (i * 8));
|
| 259 |
+
|
| 260 |
+
s--;
|
| 261 |
+
}
|
| 262 |
+
hash ^= te_hash(intval);
|
| 263 |
+
intval = 0;
|
| 264 |
+
}
|
| 265 |
+
|
| 266 |
+
return hash;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
size_t te_hash(double d) {
|
| 270 |
+
// memcpy as type punning. Should be optimized out.
|
| 271 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 272 |
+
int64_t n;
|
| 273 |
+
std::memcpy(&n, &d, sizeof d);
|
| 274 |
+
return te_hash(n);
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
size_t te_hash(float d) {
|
| 278 |
+
// memcpy as type punning. Should be optimized out.
|
| 279 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 280 |
+
int32_t n;
|
| 281 |
+
std::memcpy(&n, &d, sizeof d);
|
| 282 |
+
return te_hash(n);
|
| 283 |
+
}
|
| 284 |
+
|
| 285 |
+
size_t te_hash(at::Half d) {
|
| 286 |
+
// memcpy as type punning. Should be optimized out.
|
| 287 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 288 |
+
int16_t n;
|
| 289 |
+
std::memcpy(&n, &d, sizeof d);
|
| 290 |
+
return te_hash(n);
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
size_t te_hash(at::BFloat16 d) {
|
| 294 |
+
// memcpy as type punning. Should be optimized out.
|
| 295 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 296 |
+
int16_t n;
|
| 297 |
+
std::memcpy(&n, &d, sizeof d);
|
| 298 |
+
return te_hash(n);
|
| 299 |
+
}
|
| 300 |
+
};
|
| 301 |
+
|
| 302 |
+
} // namespace tensorexpr
|
| 303 |
+
} // namespace jit
|
| 304 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/intrinsic_symbols.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef TORCH_ENABLE_LLVM
|
| 4 |
+
#include <c10/util/ArrayRef.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace jit {
|
| 8 |
+
namespace tensorexpr {
|
| 9 |
+
|
| 10 |
+
struct SymbolAddress {
|
| 11 |
+
const char* symbol;
|
| 12 |
+
void* address;
|
| 13 |
+
|
| 14 |
+
SymbolAddress(const char* sym, void* addr) : symbol(sym), address(addr) {}
|
| 15 |
+
};
|
| 16 |
+
|
| 17 |
+
c10::ArrayRef<SymbolAddress> getIntrinsicSymbols();
|
| 18 |
+
|
| 19 |
+
} // namespace tensorexpr
|
| 20 |
+
} // namespace jit
|
| 21 |
+
} // namespace torch
|
| 22 |
+
#endif // TORCH_ENABLE_LLVM
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir.h
ADDED
|
@@ -0,0 +1,934 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <string>
|
| 4 |
+
#include <utility>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
#include <c10/util/string_utils.h>
|
| 8 |
+
#include <torch/csrc/jit/tensorexpr/exceptions.h>
|
| 9 |
+
#include <torch/csrc/jit/tensorexpr/expr.h>
|
| 10 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 11 |
+
#include <torch/csrc/jit/tensorexpr/stmt.h>
|
| 12 |
+
|
| 13 |
+
#include <ATen/core/ivalue.h>
|
| 14 |
+
|
| 15 |
+
namespace torch {
|
| 16 |
+
namespace jit {
|
| 17 |
+
namespace tensorexpr {
|
| 18 |
+
|
| 19 |
+
enum CompareSelectOperation {
|
| 20 |
+
kEQ = 0,
|
| 21 |
+
kGT,
|
| 22 |
+
kGE,
|
| 23 |
+
kLT,
|
| 24 |
+
kLE,
|
| 25 |
+
kNE,
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
enum CompareSelectBias {
|
| 29 |
+
kUnbiased,
|
| 30 |
+
kLikely,
|
| 31 |
+
kUnlikely,
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
inline int getPrecedence(IRNodeType ty) {
|
| 35 |
+
// Match C++ operator precedence rules, since some pretty-print expressions to
|
| 36 |
+
// C++. SEE: https://en.cppreference.com/w/cpp/language/operator_precedence
|
| 37 |
+
switch (ty) {
|
| 38 |
+
case kPrimitive:
|
| 39 |
+
return 0;
|
| 40 |
+
case kCast:
|
| 41 |
+
case kBitCast:
|
| 42 |
+
return 2;
|
| 43 |
+
case kAdd:
|
| 44 |
+
case kSub:
|
| 45 |
+
return 6;
|
| 46 |
+
case kMul:
|
| 47 |
+
case kDiv:
|
| 48 |
+
case kMod:
|
| 49 |
+
return 5;
|
| 50 |
+
case kMax:
|
| 51 |
+
case kMin:
|
| 52 |
+
return 99;
|
| 53 |
+
case kAnd:
|
| 54 |
+
return 11;
|
| 55 |
+
case kOr:
|
| 56 |
+
return 13;
|
| 57 |
+
case kLshift:
|
| 58 |
+
case kRshift:
|
| 59 |
+
return 7;
|
| 60 |
+
case kXor:
|
| 61 |
+
return 12;
|
| 62 |
+
case kCompareSelect:
|
| 63 |
+
return 16;
|
| 64 |
+
default:
|
| 65 |
+
return 99;
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
class TORCH_API Cast : public ExprNode<Cast> {
|
| 70 |
+
public:
|
| 71 |
+
ExprPtr src_value() const {
|
| 72 |
+
return src_value_;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
void set_src_value(ExprPtr src_value) {
|
| 76 |
+
src_value_ = std::move(src_value);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
|
| 80 |
+
return ExprHandle(alloc<Cast>(dtype, src_value.node()));
|
| 81 |
+
}
|
| 82 |
+
Cast(Dtype dtype, ExprPtr src_value)
|
| 83 |
+
: ExprNodeBase(dtype, kCast), src_value_(std::move(src_value)) {}
|
| 84 |
+
|
| 85 |
+
bool isConstant() const override {
|
| 86 |
+
return src_value_->isConstant();
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
private:
|
| 90 |
+
ExprPtr src_value_;
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
template <typename T>
|
| 94 |
+
ExprHandle cast(const ExprHandle& src_value) {
|
| 95 |
+
return Cast::make(Dtype(ToDtype<T>(), src_value.dtype().lanes()), src_value);
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
// This is a bitwise cast, akin to bitcast in LLVM
|
| 99 |
+
class TORCH_API BitCast : public ExprNode<BitCast> {
|
| 100 |
+
public:
|
| 101 |
+
ExprPtr src_value() const {
|
| 102 |
+
return src_value_;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
void set_src_value(ExprPtr src_value) {
|
| 106 |
+
src_value_ = std::move(src_value);
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
static ExprHandle make(Dtype dtype, const ExprHandle& src_value) {
|
| 110 |
+
return ExprHandle(alloc<BitCast>(dtype, src_value.node()));
|
| 111 |
+
}
|
| 112 |
+
BitCast(Dtype dtype, ExprPtr src_value)
|
| 113 |
+
: ExprNodeBase(dtype, kBitCast), src_value_(std::move(src_value)) {
|
| 114 |
+
TORCH_CHECK(src_value_->dtype().byte_size() == dtype.byte_size());
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
bool isConstant() const override {
|
| 118 |
+
return src_value_->isConstant();
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
private:
|
| 122 |
+
ExprPtr src_value_;
|
| 123 |
+
};
|
| 124 |
+
|
| 125 |
+
template <typename T>
|
| 126 |
+
ExprHandle bitcast(const ExprHandle& src_value) {
|
| 127 |
+
return BitCast::make(
|
| 128 |
+
Dtype(ToDtype<T>(), src_value.dtype().lanes()), src_value);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
// Represent the expression node for binary operators.
|
| 132 |
+
// A CRTP pattern to share common code among the operators.
|
| 133 |
+
template <typename Op>
|
| 134 |
+
class BinaryOpNode : public ExprNode<Op> {
|
| 135 |
+
public:
|
| 136 |
+
ExprPtr lhs() const {
|
| 137 |
+
return this->lhs_;
|
| 138 |
+
}
|
| 139 |
+
ExprPtr rhs() const {
|
| 140 |
+
return this->rhs_;
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
void set_lhs(ExprPtr lhs) {
|
| 144 |
+
lhs_ = std::move(lhs);
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
void set_rhs(ExprPtr rhs) {
|
| 148 |
+
rhs_ = std::move(rhs);
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
|
| 152 |
+
return ExprHandle(alloc<Op>(lhs.node(), rhs.node()));
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 156 |
+
BinaryOpNode(
|
| 157 |
+
ExprPtr lhs_v,
|
| 158 |
+
ExprPtr rhs_v,
|
| 159 |
+
IRNodeType expr_type,
|
| 160 |
+
ScalarType ret_type = ScalarType::Undefined)
|
| 161 |
+
: ExprNode<Op>(
|
| 162 |
+
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage)
|
| 163 |
+
BinaryOpDtype(lhs_v->dtype(), rhs_v->dtype(), ret_type),
|
| 164 |
+
expr_type),
|
| 165 |
+
lhs_(CastIfNeeded(std::move(lhs_v), ExprNode<Op>::dtype())),
|
| 166 |
+
rhs_(CastIfNeeded(std::move(rhs_v), ExprNode<Op>::dtype())) {}
|
| 167 |
+
|
| 168 |
+
private:
|
| 169 |
+
static ExprPtr CastIfNeeded(ExprPtr expr, Dtype dst_dtype) {
|
| 170 |
+
if (expr->dtype() == dst_dtype) {
|
| 171 |
+
return expr;
|
| 172 |
+
}
|
| 173 |
+
return Cast::make(dst_dtype, ExprHandle(std::move(expr))).node();
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
ExprPtr lhs_;
|
| 177 |
+
ExprPtr rhs_;
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
namespace detail {
|
| 181 |
+
template <typename T>
|
| 182 |
+
void bin_op_deducer(BinaryOpNode<T>);
|
| 183 |
+
bool bin_op_deducer(...);
|
| 184 |
+
} // namespace detail
|
| 185 |
+
|
| 186 |
+
class TORCH_API Add : public BinaryOpNode<Add> {
|
| 187 |
+
public:
|
| 188 |
+
Add(ExprPtr lhs, ExprPtr rhs)
|
| 189 |
+
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAdd) {}
|
| 190 |
+
};
|
| 191 |
+
|
| 192 |
+
class TORCH_API Sub : public BinaryOpNode<Sub> {
|
| 193 |
+
public:
|
| 194 |
+
Sub(ExprPtr lhs, ExprPtr rhs)
|
| 195 |
+
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kSub) {}
|
| 196 |
+
};
|
| 197 |
+
|
| 198 |
+
class TORCH_API Mul : public BinaryOpNode<Mul> {
|
| 199 |
+
public:
|
| 200 |
+
Mul(ExprPtr lhs, ExprPtr rhs)
|
| 201 |
+
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMul) {}
|
| 202 |
+
};
|
| 203 |
+
|
| 204 |
+
class TORCH_API Div : public BinaryOpNode<Div> {
|
| 205 |
+
public:
|
| 206 |
+
Div(ExprPtr lhs, ExprPtr rhs)
|
| 207 |
+
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kDiv) {}
|
| 208 |
+
};
|
| 209 |
+
|
| 210 |
+
class TORCH_API Mod : public BinaryOpNode<Mod> {
|
| 211 |
+
public:
|
| 212 |
+
Mod(ExprPtr lhs, ExprPtr rhs)
|
| 213 |
+
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMod) {}
|
| 214 |
+
};
|
| 215 |
+
|
| 216 |
+
template <typename Op>
|
| 217 |
+
class BitwiseOpNode : public BinaryOpNode<Op> {
|
| 218 |
+
public:
|
| 219 |
+
BitwiseOpNode(ExprPtr lhs, ExprPtr rhs, IRNodeType type)
|
| 220 |
+
: BinaryOpNode<Op>(std::move(lhs), std::move(rhs), type) {}
|
| 221 |
+
|
| 222 |
+
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) {
|
| 223 |
+
if (!lhs.dtype().is_integral()) {
|
| 224 |
+
throw unsupported_dtype();
|
| 225 |
+
}
|
| 226 |
+
if (lhs.dtype() != rhs.dtype()) {
|
| 227 |
+
throw malformed_input("lhs/rhs dtype mismatch");
|
| 228 |
+
}
|
| 229 |
+
return BinaryOpNode<Op>::make(lhs, rhs);
|
| 230 |
+
}
|
| 231 |
+
};
|
| 232 |
+
|
| 233 |
+
class TORCH_API And : public BitwiseOpNode<And> {
|
| 234 |
+
public:
|
| 235 |
+
And(ExprPtr lhs, ExprPtr rhs)
|
| 236 |
+
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kAnd) {}
|
| 237 |
+
};
|
| 238 |
+
|
| 239 |
+
class TORCH_API Or : public BitwiseOpNode<Or> {
|
| 240 |
+
public:
|
| 241 |
+
Or(ExprPtr lhs, ExprPtr rhs)
|
| 242 |
+
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kOr) {}
|
| 243 |
+
};
|
| 244 |
+
|
| 245 |
+
class TORCH_API Xor : public BitwiseOpNode<Xor> {
|
| 246 |
+
public:
|
| 247 |
+
Xor(ExprPtr lhs, ExprPtr rhs)
|
| 248 |
+
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kXor) {}
|
| 249 |
+
};
|
| 250 |
+
|
| 251 |
+
class TORCH_API Lshift : public BitwiseOpNode<Lshift> {
|
| 252 |
+
public:
|
| 253 |
+
Lshift(ExprPtr lhs, ExprPtr rhs)
|
| 254 |
+
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kLshift) {}
|
| 255 |
+
};
|
| 256 |
+
|
| 257 |
+
class TORCH_API Rshift : public BitwiseOpNode<Rshift> {
|
| 258 |
+
public:
|
| 259 |
+
Rshift(ExprPtr lhs, ExprPtr rhs)
|
| 260 |
+
: BitwiseOpNode(std::move(lhs), std::move(rhs), IRNodeType::kRshift) {}
|
| 261 |
+
};
|
| 262 |
+
|
| 263 |
+
// TODO: add TORCH_API
|
| 264 |
+
// Currently adding it results in a compilation error on Windows
|
| 265 |
+
class Max : public BinaryOpNode<Max> {
|
| 266 |
+
private:
|
| 267 |
+
bool propagate_nans_;
|
| 268 |
+
|
| 269 |
+
public:
|
| 270 |
+
Max(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
|
| 271 |
+
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMax),
|
| 272 |
+
propagate_nans_(propagate_nans) {}
|
| 273 |
+
|
| 274 |
+
bool propagate_nans() const {
|
| 275 |
+
return propagate_nans_;
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete;
|
| 279 |
+
static ExprHandle make(
|
| 280 |
+
const ExprHandle& lhs,
|
| 281 |
+
const ExprHandle& rhs,
|
| 282 |
+
bool propagate_nans) {
|
| 283 |
+
return ExprHandle(alloc<Max>(lhs.node(), rhs.node(), propagate_nans));
|
| 284 |
+
}
|
| 285 |
+
};
|
| 286 |
+
|
| 287 |
+
// TODO: add TORCH_API
|
| 288 |
+
// Currently adding it results in a compilation error on Windows
|
| 289 |
+
class Min : public BinaryOpNode<Min> {
|
| 290 |
+
private:
|
| 291 |
+
bool propagate_nans_;
|
| 292 |
+
|
| 293 |
+
public:
|
| 294 |
+
Min(ExprPtr lhs, ExprPtr rhs, bool propagate_nans)
|
| 295 |
+
: BinaryOpNode(std::move(lhs), std::move(rhs), IRNodeType::kMin),
|
| 296 |
+
propagate_nans_(propagate_nans) {}
|
| 297 |
+
|
| 298 |
+
bool propagate_nans() const {
|
| 299 |
+
return propagate_nans_;
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
static ExprHandle make(const ExprHandle& lhs, const ExprHandle& rhs) = delete;
|
| 303 |
+
static ExprHandle make(
|
| 304 |
+
const ExprHandle& lhs,
|
| 305 |
+
const ExprHandle& rhs,
|
| 306 |
+
bool propagate_nans) {
|
| 307 |
+
return ExprHandle(alloc<Min>(lhs.node(), rhs.node(), propagate_nans));
|
| 308 |
+
}
|
| 309 |
+
};
|
| 310 |
+
|
| 311 |
+
// Encode typed immediate values e.g. IntImm, FloatImm.
|
| 312 |
+
#define IMM_DECLARE(Type, Name) \
|
| 313 |
+
class TORCH_API Name##Imm : public ExprNode<Name##Imm> { \
|
| 314 |
+
public: \
|
| 315 |
+
Name##Imm(Type value) \
|
| 316 |
+
: ExprNodeBase(k##Name, kPrimitive), value_(value) {} \
|
| 317 |
+
bool isConstant() const override { \
|
| 318 |
+
return true; \
|
| 319 |
+
} \
|
| 320 |
+
Type value() const { \
|
| 321 |
+
return value_; \
|
| 322 |
+
} \
|
| 323 |
+
static ExprHandle make(Type value) { \
|
| 324 |
+
return ExprHandle(alloc<Name##Imm>(value)); \
|
| 325 |
+
} \
|
| 326 |
+
\
|
| 327 |
+
private: \
|
| 328 |
+
Type value_; \
|
| 329 |
+
};
|
| 330 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_DECLARE);
|
| 331 |
+
#undef IMM_DECLARE
|
| 332 |
+
|
| 333 |
+
// Get immediate by ScalarType.
|
| 334 |
+
template <typename T>
|
| 335 |
+
ExprPtr getImmediateByType(ScalarType immType, T initialVal) {
|
| 336 |
+
switch (immType) {
|
| 337 |
+
#define TYPE_CASE(Type, Name) \
|
| 338 |
+
case ScalarType::Name: \
|
| 339 |
+
return alloc<Name##Imm>(Type(initialVal));
|
| 340 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 341 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
|
| 342 |
+
#undef TYPE_CASE
|
| 343 |
+
default:
|
| 344 |
+
throw unsupported_dtype();
|
| 345 |
+
}
|
| 346 |
+
return nullptr;
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
template <typename T>
|
| 350 |
+
ExprPtr getImmediateByType(Dtype dtype, T initialVal) {
|
| 351 |
+
return getImmediateByType<T>(dtype.scalar_type(), initialVal);
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
template <typename T>
|
| 355 |
+
ExprPtr immLike(const ExprPtr& e, T v) {
|
| 356 |
+
return getImmediateByType<T>(e->dtype(), v);
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
template <typename T>
|
| 360 |
+
ExprPtr immLike(const ExprHandle& e, T v) {
|
| 361 |
+
return immLike(e.node(), v);
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
inline c10::optional<int64_t> intValue(const ExprPtr& e) {
|
| 365 |
+
#define TYPE_CASE(Type, Name) \
|
| 366 |
+
if (auto v = to<Name##Imm>(e)) { \
|
| 367 |
+
return v->value(); \
|
| 368 |
+
}
|
| 369 |
+
AT_FORALL_INT_TYPES(TYPE_CASE);
|
| 370 |
+
#undef TYPE_CASE
|
| 371 |
+
return c10::nullopt;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
inline c10::optional<int64_t> intValue(const ExprHandle& e) {
|
| 375 |
+
return intValue(e.node());
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
template <typename T>
|
| 379 |
+
T immediateAs(const ExprPtr& e) {
|
| 380 |
+
#define TYPE_CASE(Type, Name) \
|
| 381 |
+
if (Name##ImmPtr imm = to<Name##Imm>(e)) { \
|
| 382 |
+
return imm->value(); \
|
| 383 |
+
}
|
| 384 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
|
| 385 |
+
#undef TYPE_CASE
|
| 386 |
+
throw unsupported_dtype();
|
| 387 |
+
return 0;
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
template <typename T>
|
| 391 |
+
T immediateAs(const ExprHandle& e) {
|
| 392 |
+
return immediateAs<T>(e.node());
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
template <typename T>
|
| 396 |
+
bool immediateEquals(const ExprPtr& e, T val) {
|
| 397 |
+
#define TYPE_CASE(Type, Name) \
|
| 398 |
+
if (Name##ImmPtr imm = to<Name##Imm>(e)) { \
|
| 399 |
+
return imm->value() == val; \
|
| 400 |
+
}
|
| 401 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
|
| 402 |
+
#undef TYPE_CASE
|
| 403 |
+
throw unsupported_dtype();
|
| 404 |
+
return false;
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
TORCH_API bool immediateIsNegative(const ExprPtr& e);
|
| 408 |
+
|
| 409 |
+
TORCH_API bool immediateIsPositive(const ExprPtr& e);
|
| 410 |
+
|
| 411 |
+
TORCH_API bool immediateIsZero(const ExprPtr& e);
|
| 412 |
+
|
| 413 |
+
// Represents a ramp vector node:
|
| 414 |
+
// [base, base + 1 * stride, ... , base + (lanes - 1) * stride]
|
| 415 |
+
class TORCH_API Ramp : public ExprNode<Ramp> {
|
| 416 |
+
public:
|
| 417 |
+
ExprPtr base() const {
|
| 418 |
+
return base_;
|
| 419 |
+
}
|
| 420 |
+
ExprPtr stride() const {
|
| 421 |
+
return stride_;
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
void set_base(ExprPtr base) {
|
| 425 |
+
base_ = std::move(base);
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
void set_stride(ExprPtr stride) {
|
| 429 |
+
stride_ = std::move(stride);
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
static ExprHandle make(
|
| 433 |
+
const ExprHandle& base,
|
| 434 |
+
const ExprHandle& stride,
|
| 435 |
+
int lanes) {
|
| 436 |
+
if (stride.dtype() != base.dtype()) {
|
| 437 |
+
throw malformed_input("Bad stride in Ramp");
|
| 438 |
+
}
|
| 439 |
+
return ExprHandle(alloc<Ramp>(base.node(), stride.node(), lanes));
|
| 440 |
+
}
|
| 441 |
+
int lanes() const {
|
| 442 |
+
return lanes_;
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
Ramp(ExprPtr base, ExprPtr stride, int lanes)
|
| 446 |
+
: ExprNodeBase(Dtype(base->dtype(), lanes)),
|
| 447 |
+
base_(std::move(base)),
|
| 448 |
+
stride_(std::move(stride)),
|
| 449 |
+
lanes_(lanes) {}
|
| 450 |
+
|
| 451 |
+
private:
|
| 452 |
+
ExprPtr base_;
|
| 453 |
+
ExprPtr stride_;
|
| 454 |
+
int lanes_;
|
| 455 |
+
};
|
| 456 |
+
|
| 457 |
+
class TORCH_API Load : public ExprNode<Load> {
|
| 458 |
+
public:
|
| 459 |
+
VarPtr base_handle() const {
|
| 460 |
+
return buf_->base_handle();
|
| 461 |
+
}
|
| 462 |
+
std::vector<ExprPtr> indices() const {
|
| 463 |
+
return indices_;
|
| 464 |
+
}
|
| 465 |
+
ExprPtr flat_index() const {
|
| 466 |
+
TORCH_CHECK(indices_.size() == 1, "Indices haven't been flattened.");
|
| 467 |
+
return indices_[0];
|
| 468 |
+
}
|
| 469 |
+
BufPtr buf() const {
|
| 470 |
+
return buf_;
|
| 471 |
+
}
|
| 472 |
+
|
| 473 |
+
void set_buf(BufPtr buf) {
|
| 474 |
+
buf_ = std::move(buf);
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
void set_indices(std::vector<ExprPtr> indices) {
|
| 478 |
+
indices_ = std::move(indices);
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
static ExprHandle make(
|
| 482 |
+
Dtype dtype,
|
| 483 |
+
const BufHandle& buf,
|
| 484 |
+
const std::vector<ExprHandle>& indices);
|
| 485 |
+
static ExprHandle make(
|
| 486 |
+
const BufHandle& buf,
|
| 487 |
+
const std::vector<ExprHandle>& indices);
|
| 488 |
+
|
| 489 |
+
Load(Dtype dtype, BufPtr base_handle, std::vector<ExprPtr> indices);
|
| 490 |
+
Load(BufPtr base_handle, const std::vector<ExprPtr>& indices);
|
| 491 |
+
|
| 492 |
+
private:
|
| 493 |
+
BufPtr buf_;
|
| 494 |
+
std::vector<ExprPtr> indices_;
|
| 495 |
+
};
|
| 496 |
+
|
| 497 |
+
class TORCH_API Broadcast : public ExprNode<Broadcast> {
|
| 498 |
+
public:
|
| 499 |
+
ExprPtr value() const {
|
| 500 |
+
return value_;
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
void set_value(ExprPtr value) {
|
| 504 |
+
value_ = std::move(value);
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
int lanes() const {
|
| 508 |
+
return lanes_;
|
| 509 |
+
}
|
| 510 |
+
static ExprHandle make(const ExprHandle& value, int lanes) {
|
| 511 |
+
return ExprHandle(alloc<Broadcast>(value.node(), lanes));
|
| 512 |
+
}
|
| 513 |
+
Broadcast(ExprPtr value, int lanes)
|
| 514 |
+
: ExprNodeBase(Dtype(value->dtype(), lanes)),
|
| 515 |
+
value_(std::move(value)),
|
| 516 |
+
lanes_(lanes) {}
|
| 517 |
+
|
| 518 |
+
private:
|
| 519 |
+
ExprPtr value_;
|
| 520 |
+
int lanes_;
|
| 521 |
+
};
|
| 522 |
+
|
| 523 |
+
class TORCH_API IfThenElse : public ExprNode<IfThenElse> {
|
| 524 |
+
public:
|
| 525 |
+
ExprPtr condition() const {
|
| 526 |
+
return condition_;
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
// Lazily evaluated only if condition is true
|
| 530 |
+
ExprPtr true_value() const {
|
| 531 |
+
return true_;
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
// Lazily evaluated only if condition is false
|
| 535 |
+
ExprPtr false_value() const {
|
| 536 |
+
return false_;
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
void set_condition(ExprPtr condition) {
|
| 540 |
+
condition_ = std::move(condition);
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
void set_true_value(ExprPtr true_value) {
|
| 544 |
+
true_ = std::move(true_value);
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
void set_false_value(ExprPtr false_value) {
|
| 548 |
+
false_ = std::move(false_value);
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
static ExprHandle make(
|
| 552 |
+
const ExprHandle& c,
|
| 553 |
+
const ExprHandle& t,
|
| 554 |
+
const ExprHandle& f) {
|
| 555 |
+
if (!c.dtype().is_integral()) {
|
| 556 |
+
throw unsupported_dtype();
|
| 557 |
+
}
|
| 558 |
+
if (c.dtype().lanes() != 1) {
|
| 559 |
+
throw unsupported_dtype();
|
| 560 |
+
}
|
| 561 |
+
if (t.dtype() != f.dtype()) {
|
| 562 |
+
throw malformed_input("Bad dtype in IfThenElse");
|
| 563 |
+
}
|
| 564 |
+
return ExprHandle(alloc<IfThenElse>(c.node(), t.node(), f.node()));
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
IfThenElse(ExprPtr c, ExprPtr t, ExprPtr f)
|
| 568 |
+
: ExprNodeBase(t->dtype()),
|
| 569 |
+
condition_(std::move(c)),
|
| 570 |
+
true_(std::move(t)),
|
| 571 |
+
false_(std::move(f)) {}
|
| 572 |
+
|
| 573 |
+
private:
|
| 574 |
+
ExprPtr condition_;
|
| 575 |
+
ExprPtr true_;
|
| 576 |
+
ExprPtr false_;
|
| 577 |
+
};
|
| 578 |
+
|
| 579 |
+
class TORCH_API CompareSelect : public ExprNode<CompareSelect> {
|
| 580 |
+
public:
|
| 581 |
+
CompareSelectOperation compare_select_op() const {
|
| 582 |
+
return compare_op_;
|
| 583 |
+
}
|
| 584 |
+
ExprPtr lhs() const {
|
| 585 |
+
return this->lhs_;
|
| 586 |
+
}
|
| 587 |
+
ExprPtr rhs() const {
|
| 588 |
+
return this->rhs_;
|
| 589 |
+
}
|
| 590 |
+
ExprPtr ret_val1() const {
|
| 591 |
+
return this->ret_val1_;
|
| 592 |
+
}
|
| 593 |
+
ExprPtr ret_val2() const {
|
| 594 |
+
return this->ret_val2_;
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
void set_lhs(ExprPtr lhs) {
|
| 598 |
+
lhs_ = std::move(lhs);
|
| 599 |
+
}
|
| 600 |
+
|
| 601 |
+
void set_rhs(ExprPtr rhs) {
|
| 602 |
+
rhs_ = std::move(rhs);
|
| 603 |
+
}
|
| 604 |
+
|
| 605 |
+
void set_ret_val1(ExprPtr ret_val1) {
|
| 606 |
+
ret_val1_ = std::move(ret_val1);
|
| 607 |
+
}
|
| 608 |
+
|
| 609 |
+
void set_ret_val2(ExprPtr ret_val2) {
|
| 610 |
+
ret_val2_ = std::move(ret_val2);
|
| 611 |
+
}
|
| 612 |
+
|
| 613 |
+
CompareSelectBias bias() const {
|
| 614 |
+
return bias_;
|
| 615 |
+
}
|
| 616 |
+
|
| 617 |
+
static ExprHandle make(
|
| 618 |
+
const ExprHandle& lhs,
|
| 619 |
+
const ExprHandle& rhs,
|
| 620 |
+
CompareSelectOperation cmp_op,
|
| 621 |
+
CompareSelectBias bias = kUnbiased) {
|
| 622 |
+
if (lhs.dtype() != rhs.dtype()) {
|
| 623 |
+
throw malformed_input("bad dtype in CompareSelect");
|
| 624 |
+
}
|
| 625 |
+
return ExprHandle(alloc<CompareSelect>(
|
| 626 |
+
lhs.node(),
|
| 627 |
+
rhs.node(),
|
| 628 |
+
IntImm::make(1).node(),
|
| 629 |
+
IntImm::make(0).node(),
|
| 630 |
+
cmp_op,
|
| 631 |
+
bias));
|
| 632 |
+
}
|
| 633 |
+
|
| 634 |
+
static ExprHandle make(
|
| 635 |
+
const ExprHandle& lhs,
|
| 636 |
+
const ExprHandle& rhs,
|
| 637 |
+
const ExprHandle& ret_val1,
|
| 638 |
+
const ExprHandle& ret_val2,
|
| 639 |
+
CompareSelectOperation cmp_op,
|
| 640 |
+
CompareSelectBias bias = kUnbiased) {
|
| 641 |
+
if (lhs.dtype() != rhs.dtype() || ret_val1.dtype() != ret_val2.dtype()) {
|
| 642 |
+
throw malformed_input("bad dtype in CompareSelect");
|
| 643 |
+
}
|
| 644 |
+
return ExprHandle(alloc<CompareSelect>(
|
| 645 |
+
lhs.node(),
|
| 646 |
+
rhs.node(),
|
| 647 |
+
ret_val1.node(),
|
| 648 |
+
ret_val2.node(),
|
| 649 |
+
cmp_op,
|
| 650 |
+
bias));
|
| 651 |
+
}
|
| 652 |
+
|
| 653 |
+
CompareSelect(
|
| 654 |
+
ExprPtr lhs,
|
| 655 |
+
ExprPtr rhs,
|
| 656 |
+
ExprPtr ret_val1,
|
| 657 |
+
ExprPtr ret_val2,
|
| 658 |
+
CompareSelectOperation cmp_op,
|
| 659 |
+
CompareSelectBias bias = kUnbiased)
|
| 660 |
+
: ExprNodeBase(ret_val1->dtype()),
|
| 661 |
+
lhs_(std::move(lhs)),
|
| 662 |
+
rhs_(std::move(rhs)),
|
| 663 |
+
ret_val1_(std::move(ret_val1)),
|
| 664 |
+
ret_val2_(std::move(ret_val2)),
|
| 665 |
+
compare_op_(cmp_op),
|
| 666 |
+
bias_(bias) {}
|
| 667 |
+
|
| 668 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 669 |
+
CompareSelect(
|
| 670 |
+
ExprPtr lhs,
|
| 671 |
+
ExprPtr rhs,
|
| 672 |
+
CompareSelectOperation cmp_op,
|
| 673 |
+
CompareSelectBias bias = kUnbiased)
|
| 674 |
+
: ExprNodeBase(kInt),
|
| 675 |
+
lhs_(std::move(lhs)),
|
| 676 |
+
rhs_(std::move(rhs)),
|
| 677 |
+
ret_val1_(alloc<IntImm>(1)),
|
| 678 |
+
ret_val2_(alloc<IntImm>(0)),
|
| 679 |
+
compare_op_(cmp_op),
|
| 680 |
+
bias_(bias) {}
|
| 681 |
+
|
| 682 |
+
private:
|
| 683 |
+
ExprPtr lhs_;
|
| 684 |
+
ExprPtr rhs_;
|
| 685 |
+
ExprPtr ret_val1_;
|
| 686 |
+
ExprPtr ret_val2_;
|
| 687 |
+
CompareSelectOperation compare_op_;
|
| 688 |
+
CompareSelectBias bias_;
|
| 689 |
+
};
|
| 690 |
+
|
| 691 |
+
enum IntrinsicsOp {
|
| 692 |
+
kSin,
|
| 693 |
+
kCos,
|
| 694 |
+
kTan,
|
| 695 |
+
kAsin,
|
| 696 |
+
kAcos,
|
| 697 |
+
kAtan,
|
| 698 |
+
kAtan2,
|
| 699 |
+
kSinh,
|
| 700 |
+
kCosh,
|
| 701 |
+
kTanh,
|
| 702 |
+
kSigmoid,
|
| 703 |
+
kExp,
|
| 704 |
+
kExpm1,
|
| 705 |
+
kAbs,
|
| 706 |
+
kLog,
|
| 707 |
+
kLog2,
|
| 708 |
+
kLog10,
|
| 709 |
+
kLog1p,
|
| 710 |
+
kErf,
|
| 711 |
+
kErfc,
|
| 712 |
+
kSqrt,
|
| 713 |
+
kRsqrt,
|
| 714 |
+
kPow,
|
| 715 |
+
kCeil,
|
| 716 |
+
kFloor,
|
| 717 |
+
kRound,
|
| 718 |
+
kTrunc,
|
| 719 |
+
kFmod,
|
| 720 |
+
kRemainder,
|
| 721 |
+
kLgamma,
|
| 722 |
+
kFrac,
|
| 723 |
+
kIsNan,
|
| 724 |
+
kRand, // We need more discussions on this. Should we consider stateful?
|
| 725 |
+
kMaxIntrinsicsOp,
|
| 726 |
+
};
|
| 727 |
+
|
| 728 |
+
class TORCH_API Intrinsics : public ExprNode<Intrinsics> {
|
| 729 |
+
public:
|
| 730 |
+
static ExprHandle make(IntrinsicsOp op_type, const ExprHandle& v1) {
|
| 731 |
+
return ExprHandle(alloc<Intrinsics>(op_type, v1.node()));
|
| 732 |
+
}
|
| 733 |
+
|
| 734 |
+
static ExprHandle make(
|
| 735 |
+
IntrinsicsOp op_type,
|
| 736 |
+
const ExprHandle& v1,
|
| 737 |
+
const ExprHandle& v2) {
|
| 738 |
+
return ExprHandle(alloc<Intrinsics>(op_type, v1.node(), v2.node()));
|
| 739 |
+
}
|
| 740 |
+
|
| 741 |
+
static ExprHandle make(
|
| 742 |
+
IntrinsicsOp op_type,
|
| 743 |
+
const std::vector<ExprHandle>& params) {
|
| 744 |
+
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
|
| 745 |
+
std::vector<ExprPtr> params_nodes(params.size());
|
| 746 |
+
for (size_t i = 0; i < params.size(); i++) {
|
| 747 |
+
params_nodes[i] = params[i].node();
|
| 748 |
+
}
|
| 749 |
+
return ExprHandle(alloc<Intrinsics>(op_type, params_nodes));
|
| 750 |
+
}
|
| 751 |
+
|
| 752 |
+
static ExprHandle make(IntrinsicsOp op_type, Dtype dtype) {
|
| 753 |
+
return ExprHandle(alloc<Intrinsics>(op_type, dtype));
|
| 754 |
+
}
|
| 755 |
+
|
| 756 |
+
IntrinsicsOp op_type() const {
|
| 757 |
+
return op_type_;
|
| 758 |
+
}
|
| 759 |
+
|
| 760 |
+
std::string func_name() const {
|
| 761 |
+
switch (op_type()) {
|
| 762 |
+
case kSin:
|
| 763 |
+
return "sin";
|
| 764 |
+
case kCos:
|
| 765 |
+
return "cos";
|
| 766 |
+
case kTan:
|
| 767 |
+
return "tan";
|
| 768 |
+
case kAsin:
|
| 769 |
+
return "asin";
|
| 770 |
+
case kAcos:
|
| 771 |
+
return "acos";
|
| 772 |
+
case kAtan:
|
| 773 |
+
return "atan";
|
| 774 |
+
case kAtan2:
|
| 775 |
+
return "atan2";
|
| 776 |
+
case kSinh:
|
| 777 |
+
return "sinh";
|
| 778 |
+
case kCosh:
|
| 779 |
+
return "cosh";
|
| 780 |
+
case kTanh:
|
| 781 |
+
return "tanh";
|
| 782 |
+
case kSigmoid:
|
| 783 |
+
return "sigmoid";
|
| 784 |
+
case kExp:
|
| 785 |
+
return "exp";
|
| 786 |
+
case kAbs:
|
| 787 |
+
return "abs";
|
| 788 |
+
case kLog:
|
| 789 |
+
return "log";
|
| 790 |
+
case kLog2:
|
| 791 |
+
return "log2";
|
| 792 |
+
case kLog10:
|
| 793 |
+
return "log10";
|
| 794 |
+
case kLog1p:
|
| 795 |
+
return "log1p";
|
| 796 |
+
case kErf:
|
| 797 |
+
return "erf";
|
| 798 |
+
case kSqrt:
|
| 799 |
+
return "sqrt";
|
| 800 |
+
case kRsqrt:
|
| 801 |
+
return "rsqrt";
|
| 802 |
+
case kPow:
|
| 803 |
+
return "pow";
|
| 804 |
+
case kCeil:
|
| 805 |
+
return "ceil";
|
| 806 |
+
case kFloor:
|
| 807 |
+
return "floor";
|
| 808 |
+
case kRound:
|
| 809 |
+
return "round";
|
| 810 |
+
case kTrunc:
|
| 811 |
+
return "trunc";
|
| 812 |
+
case kRand:
|
| 813 |
+
return "rand";
|
| 814 |
+
case kFmod:
|
| 815 |
+
return "fmod";
|
| 816 |
+
case kRemainder:
|
| 817 |
+
return "remainder";
|
| 818 |
+
case kLgamma:
|
| 819 |
+
return "lgamma";
|
| 820 |
+
case kExpm1:
|
| 821 |
+
return "expm1";
|
| 822 |
+
case kErfc:
|
| 823 |
+
return "erfc";
|
| 824 |
+
case kFrac:
|
| 825 |
+
return "frac";
|
| 826 |
+
case kIsNan:
|
| 827 |
+
return "isnan";
|
| 828 |
+
default:
|
| 829 |
+
throw std::runtime_error(
|
| 830 |
+
"invalid op_type: " + c10::to_string(op_type()));
|
| 831 |
+
}
|
| 832 |
+
}
|
| 833 |
+
|
| 834 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 835 |
+
Intrinsics(IntrinsicsOp op_type, Dtype dtype)
|
| 836 |
+
: ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
|
| 837 |
+
params_({}),
|
| 838 |
+
op_type_(op_type) {
|
| 839 |
+
if (OpArgCount(op_type) != 0) {
|
| 840 |
+
throw malformed_input("bad arg count in Intrinsics");
|
| 841 |
+
}
|
| 842 |
+
}
|
| 843 |
+
|
| 844 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 845 |
+
Intrinsics(IntrinsicsOp op_type, ExprPtr v1)
|
| 846 |
+
: ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype())),
|
| 847 |
+
params_({std::move(v1)}),
|
| 848 |
+
op_type_(op_type) {
|
| 849 |
+
if (OpArgCount(op_type) != 1) {
|
| 850 |
+
throw malformed_input("bad arg count in Intrinsics");
|
| 851 |
+
}
|
| 852 |
+
}
|
| 853 |
+
|
| 854 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 855 |
+
Intrinsics(IntrinsicsOp op_type, ExprPtr v1, ExprPtr v2)
|
| 856 |
+
: ExprNodeBase(IntrinsicsDtype(op_type, v1->dtype(), v2->dtype())),
|
| 857 |
+
params_({std::move(v1), std::move(v2)}),
|
| 858 |
+
op_type_(op_type) {
|
| 859 |
+
if (OpArgCount(op_type) != 2) {
|
| 860 |
+
throw malformed_input("bad arg count in Intrinsics");
|
| 861 |
+
}
|
| 862 |
+
}
|
| 863 |
+
|
| 864 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 865 |
+
Intrinsics(IntrinsicsOp op_type, const std::vector<ExprPtr>& params)
|
| 866 |
+
: ExprNodeBase(IntrinsicsDtype(op_type, params)),
|
| 867 |
+
params_(params),
|
| 868 |
+
op_type_(op_type) {
|
| 869 |
+
if (OpArgCount(op_type) != nparams()) {
|
| 870 |
+
throw malformed_input("bad arg count in Intrinsics");
|
| 871 |
+
}
|
| 872 |
+
}
|
| 873 |
+
|
| 874 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 875 |
+
Intrinsics(
|
| 876 |
+
IntrinsicsOp op_type,
|
| 877 |
+
Dtype dtype,
|
| 878 |
+
const std::vector<ExprPtr>& params)
|
| 879 |
+
: ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
|
| 880 |
+
params_(params),
|
| 881 |
+
op_type_(op_type) {
|
| 882 |
+
if (OpArgCount(op_type) != nparams()) {
|
| 883 |
+
throw malformed_input("bad arg count in Intrinsics");
|
| 884 |
+
}
|
| 885 |
+
}
|
| 886 |
+
|
| 887 |
+
bool isPure() const {
|
| 888 |
+
return op_type_ != kRand;
|
| 889 |
+
}
|
| 890 |
+
|
| 891 |
+
int nparams() const {
|
| 892 |
+
return params_.size();
|
| 893 |
+
}
|
| 894 |
+
|
| 895 |
+
ExprPtr param(int index) const {
|
| 896 |
+
return params_[index];
|
| 897 |
+
}
|
| 898 |
+
const std::vector<ExprPtr>& params() const {
|
| 899 |
+
return params_;
|
| 900 |
+
}
|
| 901 |
+
|
| 902 |
+
void set_params(std::vector<ExprPtr> params) {
|
| 903 |
+
params_ = std::move(params);
|
| 904 |
+
}
|
| 905 |
+
|
| 906 |
+
static int OpArgCount(IntrinsicsOp op_type);
|
| 907 |
+
|
| 908 |
+
private:
|
| 909 |
+
static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1);
|
| 910 |
+
static Dtype IntrinsicsDtype(IntrinsicsOp op_type, Dtype dt1, Dtype dt2);
|
| 911 |
+
static Dtype IntrinsicsDtype(
|
| 912 |
+
IntrinsicsOp op_type,
|
| 913 |
+
const std::vector<ExprPtr>& params);
|
| 914 |
+
|
| 915 |
+
std::vector<ExprPtr> params_;
|
| 916 |
+
IntrinsicsOp op_type_;
|
| 917 |
+
};
|
| 918 |
+
|
| 919 |
+
TORCH_API std::vector<ExprPtr> ExprHandleVectorToExprVector(
|
| 920 |
+
const std::vector<ExprHandle>&);
|
| 921 |
+
TORCH_API std::vector<ExprHandle> ExprVectorToExprHandleVector(
|
| 922 |
+
const std::vector<ExprPtr>&);
|
| 923 |
+
TORCH_API std::vector<VarPtr> VarHandleVectorToVarVector(
|
| 924 |
+
const std::vector<VarHandle>&);
|
| 925 |
+
TORCH_API std::vector<VarHandle> VarVectorToVarHandleVector(
|
| 926 |
+
const std::vector<VarPtr>&);
|
| 927 |
+
TORCH_API ExprPtr flatten_index(
|
| 928 |
+
const std::vector<ExprPtr>& dims,
|
| 929 |
+
const std::vector<ExprPtr>& indices,
|
| 930 |
+
const std::vector<ExprPtr>& strides);
|
| 931 |
+
|
| 932 |
+
} // namespace tensorexpr
|
| 933 |
+
} // namespace jit
|
| 934 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_cloner.h
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/ScalarType.h>
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <vector>
|
| 5 |
+
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
|
| 7 |
+
|
| 8 |
+
namespace torch {
|
| 9 |
+
namespace jit {
|
| 10 |
+
namespace tensorexpr {
|
| 11 |
+
|
| 12 |
+
class TORCH_API IRCloner : public IRMutator {
|
| 13 |
+
public:
|
| 14 |
+
~IRCloner() override = default;
|
| 15 |
+
ExprPtr mutate(AddPtr v) override;
|
| 16 |
+
ExprPtr mutate(SubPtr v) override;
|
| 17 |
+
ExprPtr mutate(MulPtr v) override;
|
| 18 |
+
ExprPtr mutate(DivPtr v) override;
|
| 19 |
+
ExprPtr mutate(ModPtr v) override;
|
| 20 |
+
ExprPtr mutate(MaxPtr v) override;
|
| 21 |
+
ExprPtr mutate(MinPtr v) override;
|
| 22 |
+
ExprPtr mutate(AndPtr v) override;
|
| 23 |
+
ExprPtr mutate(OrPtr v) override;
|
| 24 |
+
ExprPtr mutate(XorPtr v) override;
|
| 25 |
+
ExprPtr mutate(LshiftPtr v) override;
|
| 26 |
+
ExprPtr mutate(RshiftPtr v) override;
|
| 27 |
+
ExprPtr mutate(CompareSelectPtr v) override;
|
| 28 |
+
#define IMM_MUTATE_DECLARE(Type, Name) ExprPtr mutate(Name##ImmPtr v) override;
|
| 29 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE);
|
| 30 |
+
#undef IMM_MUTATE_DECLARE
|
| 31 |
+
ExprPtr mutate(CastPtr v) override;
|
| 32 |
+
ExprPtr mutate(BitCastPtr v) override;
|
| 33 |
+
ExprPtr mutate(VarPtr v) override;
|
| 34 |
+
ExprPtr mutate(BufPtr v) override;
|
| 35 |
+
ExprPtr mutate(RampPtr v) override;
|
| 36 |
+
ExprPtr mutate(LoadPtr v) override;
|
| 37 |
+
ExprPtr mutate(BroadcastPtr v) override;
|
| 38 |
+
ExprPtr mutate(IfThenElsePtr v) override;
|
| 39 |
+
ExprPtr mutate(IntrinsicsPtr v) override;
|
| 40 |
+
|
| 41 |
+
ExprPtr mutate(TermPtr v) override;
|
| 42 |
+
ExprPtr mutate(PolynomialPtr v) override;
|
| 43 |
+
ExprPtr mutate(RoundOffPtr v) override;
|
| 44 |
+
ExprPtr mutate(MaxTermPtr v) override;
|
| 45 |
+
ExprPtr mutate(MinTermPtr v) override;
|
| 46 |
+
|
| 47 |
+
ExprPtr mutate(ReduceOpPtr v) override;
|
| 48 |
+
|
| 49 |
+
StmtPtr mutate(ForPtr v) override;
|
| 50 |
+
StmtPtr mutate(BlockPtr v) override;
|
| 51 |
+
StmtPtr mutate(StorePtr v) override;
|
| 52 |
+
StmtPtr mutate(AtomicAddPtr v) override;
|
| 53 |
+
StmtPtr mutate(SyncThreadsPtr v) override;
|
| 54 |
+
StmtPtr mutate(ExternalCallPtr v) override;
|
| 55 |
+
StmtPtr mutate(ExternalCallWithAllocPtr v) override;
|
| 56 |
+
|
| 57 |
+
StmtPtr mutate(AllocatePtr v) override;
|
| 58 |
+
StmtPtr mutate(FreePtr v) override;
|
| 59 |
+
StmtPtr mutate(LetPtr v) override;
|
| 60 |
+
StmtPtr mutate(CondPtr v) override;
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
} // namespace tensorexpr
|
| 64 |
+
} // namespace jit
|
| 65 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_mutator.h
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/ScalarType.h>
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace jit {
|
| 9 |
+
namespace tensorexpr {
|
| 10 |
+
|
| 11 |
+
class TORCH_API IRMutator {
|
| 12 |
+
public:
|
| 13 |
+
virtual ~IRMutator() = default;
|
| 14 |
+
virtual ExprPtr mutate(AddPtr v);
|
| 15 |
+
virtual ExprPtr mutate(SubPtr v);
|
| 16 |
+
virtual ExprPtr mutate(MulPtr v);
|
| 17 |
+
virtual ExprPtr mutate(DivPtr v);
|
| 18 |
+
virtual ExprPtr mutate(ModPtr v);
|
| 19 |
+
virtual ExprPtr mutate(MaxPtr v);
|
| 20 |
+
virtual ExprPtr mutate(MinPtr v);
|
| 21 |
+
virtual ExprPtr mutate(AndPtr v);
|
| 22 |
+
virtual ExprPtr mutate(OrPtr v);
|
| 23 |
+
virtual ExprPtr mutate(XorPtr v);
|
| 24 |
+
virtual ExprPtr mutate(LshiftPtr v);
|
| 25 |
+
virtual ExprPtr mutate(RshiftPtr v);
|
| 26 |
+
virtual ExprPtr mutate(CompareSelectPtr v);
|
| 27 |
+
#define IMM_MUTATE_DECLARE(Type, Name) virtual ExprPtr mutate(Name##ImmPtr v);
|
| 28 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_MUTATE_DECLARE);
|
| 29 |
+
#undef IMM_MUTATE_DECLARE
|
| 30 |
+
virtual ExprPtr mutate(CastPtr v);
|
| 31 |
+
virtual ExprPtr mutate(BitCastPtr v);
|
| 32 |
+
virtual ExprPtr mutate(VarPtr v);
|
| 33 |
+
virtual ExprPtr mutate(BufPtr v);
|
| 34 |
+
virtual ExprPtr mutate(RampPtr v);
|
| 35 |
+
virtual ExprPtr mutate(LoadPtr v);
|
| 36 |
+
virtual ExprPtr mutate(BroadcastPtr v);
|
| 37 |
+
virtual ExprPtr mutate(IfThenElsePtr v);
|
| 38 |
+
virtual ExprPtr mutate(IntrinsicsPtr v);
|
| 39 |
+
|
| 40 |
+
virtual ExprPtr mutate(TermPtr v);
|
| 41 |
+
virtual ExprPtr mutate(PolynomialPtr v);
|
| 42 |
+
virtual ExprPtr mutate(RoundOffPtr v);
|
| 43 |
+
virtual ExprPtr mutate(MaxTermPtr v);
|
| 44 |
+
virtual ExprPtr mutate(MinTermPtr v);
|
| 45 |
+
|
| 46 |
+
virtual ExprPtr mutate(ReduceOpPtr v);
|
| 47 |
+
|
| 48 |
+
virtual StmtPtr mutate(ForPtr v);
|
| 49 |
+
virtual StmtPtr mutate(BlockPtr v);
|
| 50 |
+
virtual StmtPtr mutate(StorePtr v);
|
| 51 |
+
virtual StmtPtr mutate(AtomicAddPtr v);
|
| 52 |
+
virtual StmtPtr mutate(SyncThreadsPtr v);
|
| 53 |
+
virtual StmtPtr mutate(ExternalCallPtr v);
|
| 54 |
+
virtual StmtPtr mutate(ExternalCallWithAllocPtr v);
|
| 55 |
+
|
| 56 |
+
virtual StmtPtr mutate(AllocatePtr v);
|
| 57 |
+
virtual StmtPtr mutate(FreePtr v);
|
| 58 |
+
virtual StmtPtr mutate(FreeExtPtr v);
|
| 59 |
+
virtual StmtPtr mutate(PlacementAllocatePtr v);
|
| 60 |
+
virtual StmtPtr mutate(LetPtr v);
|
| 61 |
+
virtual StmtPtr mutate(CondPtr v);
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
} // namespace tensorexpr
|
| 65 |
+
} // namespace jit
|
| 66 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_simplifier.h
ADDED
|
@@ -0,0 +1,554 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/bounds_overlap.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/eval.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/hash_provider.h>
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 7 |
+
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
|
| 8 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 9 |
+
#include <torch/csrc/jit/tensorexpr/types.h>
|
| 10 |
+
|
| 11 |
+
#include <utility>
|
| 12 |
+
|
| 13 |
+
/* IR Simplification
|
| 14 |
+
*
|
| 15 |
+
* Simplifies expressions in two stages:
|
| 16 |
+
* 1. Recursively traverse the map combining similar operations into Terms
|
| 17 |
+
* (interacted via Multiplication) and Polynomials (interacted via Addition). We
|
| 18 |
+
* reorder the components of each Term or Polynomial into a consistent order to
|
| 19 |
+
* allow combination or cancelling of like terms.
|
| 20 |
+
* 2. Once the format of the tree is minimal, expand each Term into a sequence
|
| 21 |
+
* of Muls, and each Polynomial into a sequence of Ads.
|
| 22 |
+
*/
|
| 23 |
+
|
| 24 |
+
namespace torch {
|
| 25 |
+
namespace jit {
|
| 26 |
+
namespace tensorexpr {
|
| 27 |
+
|
| 28 |
+
// A bunch of helpers for determine the Dtype of the output of a multi argument
|
| 29 |
+
// Term or Polynomial.
|
| 30 |
+
template <class ExprType>
|
| 31 |
+
Dtype promoteTypesVec(ExprPtr s, std::vector<ExprType>& v) {
|
| 32 |
+
Dtype t = s->dtype();
|
| 33 |
+
bool first = true;
|
| 34 |
+
|
| 35 |
+
for (const auto& e : v) {
|
| 36 |
+
if (first) {
|
| 37 |
+
t = Dtype(t.scalar_type(), e->dtype().lanes());
|
| 38 |
+
first = false;
|
| 39 |
+
}
|
| 40 |
+
t = promoteTypes(t, e->dtype());
|
| 41 |
+
}
|
| 42 |
+
return t;
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
template <class ExprType>
|
| 46 |
+
Dtype promoteTypesVec(std::vector<ExprType>& v) {
|
| 47 |
+
if (v.empty()) {
|
| 48 |
+
throw malformed_input("empty list of types");
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
Dtype t = v[0]->dtype();
|
| 52 |
+
for (const auto& e : v) {
|
| 53 |
+
t = promoteTypes(t, e->dtype());
|
| 54 |
+
}
|
| 55 |
+
return t;
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
template <class ExprType>
|
| 59 |
+
Dtype promoteTypesMap(
|
| 60 |
+
ExprPtr s,
|
| 61 |
+
std::unordered_map<SimplifierHashType, ExprType>& m) {
|
| 62 |
+
Dtype t = s->dtype();
|
| 63 |
+
bool first = true;
|
| 64 |
+
for (auto& e : m) {
|
| 65 |
+
if (first) {
|
| 66 |
+
t = Dtype(t.scalar_type(), e.second->dtype().lanes());
|
| 67 |
+
first = false;
|
| 68 |
+
}
|
| 69 |
+
t = promoteTypes(t, e.second->dtype());
|
| 70 |
+
}
|
| 71 |
+
return t;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
template <class ExprType>
|
| 75 |
+
Dtype promoteTypesVar(ExprType e) {
|
| 76 |
+
return e->dtype();
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
template <class ExprType, class... Args>
|
| 80 |
+
Dtype promoteTypesVar(ExprType e, Args... es) {
|
| 81 |
+
Dtype lhs = e->dtype();
|
| 82 |
+
Dtype rhs = promoteTypesVar(es...);
|
| 83 |
+
if (e->isConstant()) {
|
| 84 |
+
lhs = Dtype(lhs.scalar_type(), rhs.lanes());
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
return promoteTypes(lhs, rhs);
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
// Uses the evaluator to fold an Expression with constant terms.
|
| 91 |
+
// E.g. evaluateOp(Add(3, 4)) => 7.
|
| 92 |
+
// Expr v must not have any unbound Vars.
|
| 93 |
+
inline ExprPtr evaluateOp(ExprPtr v) {
|
| 94 |
+
ExprHandle handle(v);
|
| 95 |
+
ExprEval<SimpleIREvaluator> eval(handle);
|
| 96 |
+
|
| 97 |
+
switch (v->dtype().scalar_type()) {
|
| 98 |
+
#define TYPE_CASE(Type, Name) \
|
| 99 |
+
case ScalarType::Name: { \
|
| 100 |
+
Type val = eval.value<Type>(); \
|
| 101 |
+
return getImmediateByType(v->dtype().scalar_type(), val); \
|
| 102 |
+
}
|
| 103 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TYPE_CASE);
|
| 104 |
+
#undef TYPE_CASE
|
| 105 |
+
default:
|
| 106 |
+
LOG(FATAL) << "Unsupported datatype: " << v->dtype();
|
| 107 |
+
return nullptr;
|
| 108 |
+
}
|
| 109 |
+
return nullptr;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
// A Term represents a grouping of Exprs through multiplication.
|
| 113 |
+
// E.g. product(scalar, *variables).
|
| 114 |
+
class Term : public ExprNode<Term> {
|
| 115 |
+
public:
|
| 116 |
+
template <class... Args>
|
| 117 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 118 |
+
Term(HashProvider& hasher, ExprPtr s, Args... ts)
|
| 119 |
+
: ExprNodeBase(promoteTypesVar(s, ts...)), scalar_(s), hasher_(hasher) {
|
| 120 |
+
CHECK(s->isConstant());
|
| 121 |
+
addComponent(ts...);
|
| 122 |
+
sort();
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 126 |
+
Term(HashProvider& hasher, ExprPtr s, std::vector<ExprPtr> v)
|
| 127 |
+
: ExprNodeBase(promoteTypesVec(s, v)),
|
| 128 |
+
variables_(std::move(v)),
|
| 129 |
+
scalar_(s),
|
| 130 |
+
hasher_(hasher) {
|
| 131 |
+
sort();
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
// Convenience constructor from a map of hash -> var, used when merging Terms.
|
| 135 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 136 |
+
Term(
|
| 137 |
+
HashProvider& hasher,
|
| 138 |
+
ExprPtr s,
|
| 139 |
+
std::unordered_map<SimplifierHashType, ExprPtr> varmap)
|
| 140 |
+
: ExprNodeBase(promoteTypesMap(s, varmap)), scalar_(s), hasher_(hasher) {
|
| 141 |
+
for (auto& p : varmap) {
|
| 142 |
+
addComponent(p.second);
|
| 143 |
+
}
|
| 144 |
+
sort();
|
| 145 |
+
}
|
| 146 |
+
|
| 147 |
+
ExprPtr scalar() const {
|
| 148 |
+
return scalar_;
|
| 149 |
+
}
|
| 150 |
+
const std::vector<ExprPtr>& variables() const {
|
| 151 |
+
return variables_;
|
| 152 |
+
}
|
| 153 |
+
HashProvider& hasher() const {
|
| 154 |
+
return hasher_;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
// Produce a hash of just the variable components of this term, to determine
|
| 158 |
+
// if it can be combined with another term.
|
| 159 |
+
SimplifierHashType hashVars() const;
|
| 160 |
+
|
| 161 |
+
private:
|
| 162 |
+
std::vector<ExprPtr> variables_;
|
| 163 |
+
ExprPtr scalar_;
|
| 164 |
+
HashProvider& hasher_;
|
| 165 |
+
|
| 166 |
+
void addComponent() {}
|
| 167 |
+
void addComponent(ExprPtr e) {
|
| 168 |
+
variables_.push_back(std::move(e));
|
| 169 |
+
}
|
| 170 |
+
template <class... Es>
|
| 171 |
+
void addComponent(ExprPtr e, Es&&... es) {
|
| 172 |
+
addComponent(std::move(e));
|
| 173 |
+
addComponent(std::forward<Es>(es)...);
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
// Sort by hash to normalize order of components.
|
| 177 |
+
void sort();
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
// Polynomial represents a grouping of Exprs by addition.
|
| 181 |
+
// E.g. sum(*variables, scalar).
|
| 182 |
+
// This would better be called Expression, but, naming conflict...
|
| 183 |
+
class Polynomial : public ExprNode<Polynomial> {
|
| 184 |
+
public:
|
| 185 |
+
template <class... Args>
|
| 186 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 187 |
+
Polynomial(HashProvider& hasher, ExprPtr s, Args... ts)
|
| 188 |
+
: ExprNodeBase(promoteTypesVar(s, ts...)), scalar_(s), hasher_(hasher) {
|
| 189 |
+
CHECK(s->isConstant());
|
| 190 |
+
addTerm(ts...);
|
| 191 |
+
sort();
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 195 |
+
Polynomial(HashProvider& hasher, ExprPtr s, std::vector<TermPtr> v)
|
| 196 |
+
: ExprNodeBase(promoteTypesVec(s, v)),
|
| 197 |
+
variables_(std::move(v)),
|
| 198 |
+
scalar_(s),
|
| 199 |
+
hasher_(hasher) {
|
| 200 |
+
sort();
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
// Helper constructor for list of terms with no scalar component.
|
| 204 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 205 |
+
Polynomial(HashProvider& hasher, std::vector<TermPtr> terms)
|
| 206 |
+
: ExprNodeBase(promoteTypesVec(terms)),
|
| 207 |
+
variables_(std::move(terms)),
|
| 208 |
+
scalar_(getImmediateByType(dtype(), 0)),
|
| 209 |
+
hasher_(hasher) {
|
| 210 |
+
sort();
|
| 211 |
+
}
|
| 212 |
+
|
| 213 |
+
// Convenience constructor for map of hash -> var, used when merging
|
| 214 |
+
// Polynomials.
|
| 215 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 216 |
+
Polynomial(
|
| 217 |
+
HashProvider& hasher,
|
| 218 |
+
ExprPtr s,
|
| 219 |
+
std::unordered_map<SimplifierHashType, TermPtr> varmap)
|
| 220 |
+
: ExprNodeBase(promoteTypesMap(s, varmap)), scalar_(s), hasher_(hasher) {
|
| 221 |
+
for (auto& p : varmap) {
|
| 222 |
+
addTerm(p.second);
|
| 223 |
+
}
|
| 224 |
+
sort();
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
ExprPtr scalar() const {
|
| 228 |
+
return scalar_;
|
| 229 |
+
}
|
| 230 |
+
const std::vector<TermPtr>& variables() const {
|
| 231 |
+
return variables_;
|
| 232 |
+
}
|
| 233 |
+
HashProvider& hasher() const {
|
| 234 |
+
return hasher_;
|
| 235 |
+
}
|
| 236 |
+
|
| 237 |
+
SimplifierHashType hashVars() const;
|
| 238 |
+
|
| 239 |
+
private:
|
| 240 |
+
std::vector<TermPtr> variables_;
|
| 241 |
+
ExprPtr scalar_;
|
| 242 |
+
HashProvider& hasher_;
|
| 243 |
+
|
| 244 |
+
void addTerm(TermPtr t) {
|
| 245 |
+
variables_.push_back(std::move(t));
|
| 246 |
+
}
|
| 247 |
+
template <class... Ts>
|
| 248 |
+
void addTerm(TermPtr t, Ts&&... ts) {
|
| 249 |
+
addTerm(std::move(t));
|
| 250 |
+
addTerm(std::forward<Ts>(ts)...);
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
// Sort by hash to normalize order of terms.
|
| 254 |
+
void sort();
|
| 255 |
+
};
|
| 256 |
+
|
| 257 |
+
class RoundOff : public BinaryOpNode<RoundOff> {
|
| 258 |
+
public:
|
| 259 |
+
RoundOff(ExprPtr lhs, ExprPtr rhs)
|
| 260 |
+
: BinaryOpNode(lhs, rhs, IRNodeType::kOther) {}
|
| 261 |
+
};
|
| 262 |
+
|
| 263 |
+
class MaxTerm : public ExprNode<MaxTerm> {
|
| 264 |
+
public:
|
| 265 |
+
template <class... Args>
|
| 266 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 267 |
+
MaxTerm(HashProvider& hasher, ExprPtr s, bool p, Args... ts)
|
| 268 |
+
: ExprNodeBase(s ? promoteTypesVar(s, ts...) : promoteTypesVar(ts...)),
|
| 269 |
+
scalar_(s),
|
| 270 |
+
hasher_(hasher),
|
| 271 |
+
propagate_nans_(p) {
|
| 272 |
+
addComponent(ts...);
|
| 273 |
+
uniquefy();
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 277 |
+
MaxTerm(HashProvider& hasher, ExprPtr s, bool p, std::vector<ExprPtr> v)
|
| 278 |
+
: ExprNodeBase(s ? promoteTypesVec(s, v) : promoteTypesVec(v)),
|
| 279 |
+
variables_(std::move(v)),
|
| 280 |
+
scalar_(s),
|
| 281 |
+
hasher_(hasher),
|
| 282 |
+
propagate_nans_(p) {
|
| 283 |
+
uniquefy();
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
bool propagate_nans() const {
|
| 287 |
+
return propagate_nans_;
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
ExprPtr scalar() const {
|
| 291 |
+
return scalar_;
|
| 292 |
+
}
|
| 293 |
+
const std::vector<ExprPtr>& variables() const {
|
| 294 |
+
return variables_;
|
| 295 |
+
}
|
| 296 |
+
HashProvider& hasher() const {
|
| 297 |
+
return hasher_;
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
private:
|
| 301 |
+
std::vector<ExprPtr> variables_;
|
| 302 |
+
ExprPtr scalar_;
|
| 303 |
+
HashProvider& hasher_;
|
| 304 |
+
bool propagate_nans_;
|
| 305 |
+
|
| 306 |
+
void addComponent() {}
|
| 307 |
+
void addComponent(ExprPtr e) {
|
| 308 |
+
variables_.push_back(std::move(e));
|
| 309 |
+
}
|
| 310 |
+
template <class... Es>
|
| 311 |
+
void addComponent(ExprPtr e, Es&&... es) {
|
| 312 |
+
addComponent(std::move(e));
|
| 313 |
+
addComponent(std::forward<Es>(es)...);
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
// Uniquefy the terms using their hash.
|
| 317 |
+
void uniquefy();
|
| 318 |
+
};
|
| 319 |
+
|
| 320 |
+
class MinTerm : public ExprNode<MinTerm> {
|
| 321 |
+
public:
|
| 322 |
+
template <class... Args>
|
| 323 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 324 |
+
MinTerm(HashProvider& hasher, ExprPtr s, bool p, Args... ts)
|
| 325 |
+
: ExprNodeBase(s ? promoteTypesVar(s, ts...) : promoteTypesVar(ts...)),
|
| 326 |
+
scalar_(s),
|
| 327 |
+
hasher_(hasher),
|
| 328 |
+
propagate_nans_(p) {
|
| 329 |
+
addComponent(ts...);
|
| 330 |
+
uniquefy();
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 334 |
+
MinTerm(HashProvider& hasher, ExprPtr s, bool p, std::vector<ExprPtr> v)
|
| 335 |
+
: ExprNodeBase(s ? promoteTypesVec(s, v) : promoteTypesVec(v)),
|
| 336 |
+
variables_(std::move(v)),
|
| 337 |
+
scalar_(s),
|
| 338 |
+
hasher_(hasher),
|
| 339 |
+
propagate_nans_(p) {
|
| 340 |
+
uniquefy();
|
| 341 |
+
}
|
| 342 |
+
|
| 343 |
+
bool propagate_nans() const {
|
| 344 |
+
return propagate_nans_;
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
ExprPtr scalar() const {
|
| 348 |
+
return scalar_;
|
| 349 |
+
}
|
| 350 |
+
const std::vector<ExprPtr>& variables() const {
|
| 351 |
+
return variables_;
|
| 352 |
+
}
|
| 353 |
+
HashProvider& hasher() const {
|
| 354 |
+
return hasher_;
|
| 355 |
+
}
|
| 356 |
+
|
| 357 |
+
private:
|
| 358 |
+
std::vector<ExprPtr> variables_;
|
| 359 |
+
ExprPtr scalar_;
|
| 360 |
+
HashProvider& hasher_;
|
| 361 |
+
bool propagate_nans_;
|
| 362 |
+
|
| 363 |
+
void addComponent() {}
|
| 364 |
+
void addComponent(ExprPtr e) {
|
| 365 |
+
variables_.push_back(std::move(e));
|
| 366 |
+
}
|
| 367 |
+
template <class... Es>
|
| 368 |
+
void addComponent(ExprPtr e, Es&&... es) {
|
| 369 |
+
addComponent(std::move(e));
|
| 370 |
+
addComponent(std::forward<Es>(es)...);
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
// Uniquefy the terms using their hash.
|
| 374 |
+
void uniquefy();
|
| 375 |
+
};
|
| 376 |
+
|
| 377 |
+
// Context-sensitive IR simplification
|
| 378 |
+
using VarBoundInfo = std::unordered_map<VarPtr, analysis::Bound>;
|
| 379 |
+
|
| 380 |
+
class TORCH_API SimplifierUnderContext : public IRMutator {
|
| 381 |
+
public:
|
| 382 |
+
~SimplifierUnderContext() override = default;
|
| 383 |
+
// Add boundary info for index variables in for-loops
|
| 384 |
+
StmtPtr mutate(ForPtr v) override;
|
| 385 |
+
|
| 386 |
+
ExprPtr mutate(DivPtr v) override;
|
| 387 |
+
ExprPtr mutate(ModPtr v) override;
|
| 388 |
+
ExprPtr mutate(CompareSelectPtr v) override;
|
| 389 |
+
ExprPtr mutate(IfThenElsePtr v) override;
|
| 390 |
+
|
| 391 |
+
protected:
|
| 392 |
+
bool getLoopBoundInfo(const ExprPtr& expr, analysis::Bound* loop_bound_info);
|
| 393 |
+
|
| 394 |
+
protected:
|
| 395 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 396 |
+
HashProvider hasher_;
|
| 397 |
+
VarBoundInfo var_bound_info_;
|
| 398 |
+
};
|
| 399 |
+
|
| 400 |
+
// Stmt simplification should occur in both modes.
|
| 401 |
+
class TORCH_API PolynomialBase : public IRMutator {
|
| 402 |
+
public:
|
| 403 |
+
~PolynomialBase() override = default;
|
| 404 |
+
|
| 405 |
+
StmtPtr mutate(BlockPtr v) override;
|
| 406 |
+
|
| 407 |
+
StmtPtr mutate(CondPtr v) override;
|
| 408 |
+
|
| 409 |
+
StmtPtr mutate(ForPtr v) override;
|
| 410 |
+
|
| 411 |
+
// Trivially factorize terms by GCD of scalar components.
|
| 412 |
+
TermPtr factorizePolynomial(PolynomialPtr poly);
|
| 413 |
+
|
| 414 |
+
HashProvider& hasher() {
|
| 415 |
+
return hasher_;
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
protected:
|
| 419 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 420 |
+
HashProvider hasher_;
|
| 421 |
+
};
|
| 422 |
+
|
| 423 |
+
// Simplify the IR by combining arithmetic expressions over common terms.
|
| 424 |
+
class TORCH_API PolynomialTransformer : public PolynomialBase {
|
| 425 |
+
public:
|
| 426 |
+
using PolynomialBase::mutate;
|
| 427 |
+
// Inserts term into the provided map, in the case of a hash collision
|
| 428 |
+
// combines the term with the existing and updates the map.
|
| 429 |
+
void addOrUpdateTerm(
|
| 430 |
+
std::unordered_map<SimplifierHashType, TermPtr>& varmap,
|
| 431 |
+
TermPtr term);
|
| 432 |
+
|
| 433 |
+
// Add Polynomial expressions, combining Terms representing the same
|
| 434 |
+
// variables.
|
| 435 |
+
ExprPtr addPolynomials(PolynomialPtr lhs, PolynomialPtr rhs);
|
| 436 |
+
|
| 437 |
+
// Insert a new Term into the provided polynomial. If the new term has
|
| 438 |
+
// common variables to an existing term it is combined.
|
| 439 |
+
ExprPtr insertTerm(PolynomialPtr poly, TermPtr term);
|
| 440 |
+
|
| 441 |
+
// Merge and simplify addition.
|
| 442 |
+
ExprPtr mutate(AddPtr v) override;
|
| 443 |
+
|
| 444 |
+
// Subtract one term from another, cancelling if necessary.
|
| 445 |
+
ExprPtr subTerms(TermPtr lhs, TermPtr rhs, bool negated);
|
| 446 |
+
|
| 447 |
+
// Subtract the RHS Polynomial from the LHS Polynomial, cancelling out where
|
| 448 |
+
// possible.
|
| 449 |
+
ExprPtr subPolynomials(PolynomialPtr lhs, PolynomialPtr rhs);
|
| 450 |
+
|
| 451 |
+
// Merge and simplify subtraction.
|
| 452 |
+
ExprPtr mutate(SubPtr v) override;
|
| 453 |
+
|
| 454 |
+
// Multiply two terms together, usually creating a new term with the variable
|
| 455 |
+
// lists concatenated.
|
| 456 |
+
TermPtr mulTerms(TermPtr lhs, TermPtr rhs);
|
| 457 |
+
|
| 458 |
+
// Multiply a Polynomial by a Term.
|
| 459 |
+
ExprPtr polyByTerm(PolynomialPtr poly, TermPtr term);
|
| 460 |
+
|
| 461 |
+
// Match a rounding pattern and create a RoundOff if found.
|
| 462 |
+
ExprPtr isRoundOff(ExprPtr lhs, ExprPtr rhs);
|
| 463 |
+
|
| 464 |
+
// Inserts a new component into a term, simplifying if possible.
|
| 465 |
+
ExprPtr insertIntoTerm(TermPtr term, ExprPtr expr);
|
| 466 |
+
|
| 467 |
+
// Merge and simplify multiplication.
|
| 468 |
+
ExprPtr mutate(MulPtr v) override;
|
| 469 |
+
|
| 470 |
+
ExprPtr mutate(DivPtr v) override;
|
| 471 |
+
|
| 472 |
+
ExprPtr mutate(ModPtr v) override;
|
| 473 |
+
|
| 474 |
+
ExprPtr mutate(AndPtr v) override;
|
| 475 |
+
|
| 476 |
+
ExprPtr mutate(XorPtr v) override;
|
| 477 |
+
|
| 478 |
+
ExprPtr mutate(LshiftPtr v) override;
|
| 479 |
+
|
| 480 |
+
ExprPtr mutate(RshiftPtr v) override;
|
| 481 |
+
|
| 482 |
+
ExprPtr mutate(MaxPtr v) override;
|
| 483 |
+
|
| 484 |
+
ExprPtr mutate(MinPtr v) override;
|
| 485 |
+
|
| 486 |
+
ExprPtr mutate(CompareSelectPtr v) override;
|
| 487 |
+
|
| 488 |
+
ExprPtr mutate(IntrinsicsPtr v) override;
|
| 489 |
+
|
| 490 |
+
ExprPtr mutate(CastPtr v) override;
|
| 491 |
+
|
| 492 |
+
ExprPtr mutate(IfThenElsePtr v) override;
|
| 493 |
+
|
| 494 |
+
static ExprPtr simplify(ExprPtr e);
|
| 495 |
+
static ExprHandle simplify(const ExprHandle& e);
|
| 496 |
+
static StmtPtr simplify(StmtPtr e);
|
| 497 |
+
};
|
| 498 |
+
|
| 499 |
+
// Expands Terms and Polynomial expressions into primitive operations.
|
| 500 |
+
// Does some simple factorization and reordering.
|
| 501 |
+
class TORCH_API TermExpander : public PolynomialBase {
|
| 502 |
+
PolynomialTransformer* simplifier_;
|
| 503 |
+
std::set<VarPtr> eliminated_allocations_;
|
| 504 |
+
|
| 505 |
+
public:
|
| 506 |
+
using PolynomialBase::mutate;
|
| 507 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
| 508 |
+
TermExpander(PolynomialTransformer* simplifier) : simplifier_(simplifier) {}
|
| 509 |
+
bool check_safe() {
|
| 510 |
+
return eliminated_allocations_.empty();
|
| 511 |
+
}
|
| 512 |
+
|
| 513 |
+
// Expand Terms out to a series of Muls.
|
| 514 |
+
ExprPtr mutate(TermPtr v) override;
|
| 515 |
+
|
| 516 |
+
// Expand Polynomials out to a series of Adds.
|
| 517 |
+
ExprPtr mutate(PolynomialPtr v) override;
|
| 518 |
+
|
| 519 |
+
// Expand MaxTerms to a series of Max ops.
|
| 520 |
+
ExprPtr mutate(MaxTermPtr v) override;
|
| 521 |
+
|
| 522 |
+
// Expand MinTerms to a series of Min ops.
|
| 523 |
+
ExprPtr mutate(MinTermPtr v) override;
|
| 524 |
+
|
| 525 |
+
// Expand RoundOff to it's component: Mul(Div(lhs, rhs), rhs).
|
| 526 |
+
ExprPtr mutate(RoundOffPtr v) override;
|
| 527 |
+
|
| 528 |
+
// Eliminate zero length allocations.
|
| 529 |
+
StmtPtr mutate(AllocatePtr v) override;
|
| 530 |
+
StmtPtr mutate(FreePtr v) override;
|
| 531 |
+
|
| 532 |
+
// Override to enable condition fusing.
|
| 533 |
+
BlockPtr fuseConditions(BlockPtr v);
|
| 534 |
+
StmtPtr fuseSyncThreads(BlockPtr block);
|
| 535 |
+
StmtPtr mutate(BlockPtr v) override;
|
| 536 |
+
};
|
| 537 |
+
|
| 538 |
+
class TORCH_API IRSimplifier {
|
| 539 |
+
public:
|
| 540 |
+
static StmtPtr simplify(StmtPtr s);
|
| 541 |
+
static ExprPtr simplify(ExprPtr e);
|
| 542 |
+
static ExprHandle simplify(const ExprHandle& e) {
|
| 543 |
+
return ExprHandle(simplify(e.node()));
|
| 544 |
+
}
|
| 545 |
+
};
|
| 546 |
+
|
| 547 |
+
// Flattens the buf and performs the simplifier on the flattened dims.
|
| 548 |
+
ExprPtr buf_flat_size(BufPtr v);
|
| 549 |
+
// Returns true if expressions A and B can be simplified to an equal expression.
|
| 550 |
+
TORCH_API bool exprEquals(ExprPtr A, ExprPtr B);
|
| 551 |
+
|
| 552 |
+
} // namespace tensorexpr
|
| 553 |
+
} // namespace jit
|
| 554 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_verifier.h
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace jit {
|
| 8 |
+
namespace tensorexpr {
|
| 9 |
+
|
| 10 |
+
class Expr;
|
| 11 |
+
class ExprHandle;
|
| 12 |
+
class Mod;
|
| 13 |
+
class And;
|
| 14 |
+
class Or;
|
| 15 |
+
class Xor;
|
| 16 |
+
class Lshift;
|
| 17 |
+
class Rshift;
|
| 18 |
+
class CompareSelect;
|
| 19 |
+
class Ramp;
|
| 20 |
+
class Load;
|
| 21 |
+
class IfThenElse;
|
| 22 |
+
class Intrinsics;
|
| 23 |
+
|
| 24 |
+
class Stmt;
|
| 25 |
+
class ExternalCall;
|
| 26 |
+
class Store;
|
| 27 |
+
class For;
|
| 28 |
+
class Block;
|
| 29 |
+
|
| 30 |
+
class TORCH_API IRVerifier : public IRVisitor {
|
| 31 |
+
public:
|
| 32 |
+
IRVerifier() = default;
|
| 33 |
+
|
| 34 |
+
void visit(ModPtr v) override;
|
| 35 |
+
void visit(AndPtr v) override;
|
| 36 |
+
void visit(OrPtr v) override;
|
| 37 |
+
void visit(XorPtr v) override;
|
| 38 |
+
void visit(LshiftPtr v) override;
|
| 39 |
+
void visit(RshiftPtr v) override;
|
| 40 |
+
void visit(CompareSelectPtr v) override;
|
| 41 |
+
void visit(RampPtr v) override;
|
| 42 |
+
void visit(LoadPtr v) override;
|
| 43 |
+
void visit(IfThenElsePtr v) override;
|
| 44 |
+
void visit(IntrinsicsPtr v) override;
|
| 45 |
+
|
| 46 |
+
void visit(ExternalCallPtr v) override;
|
| 47 |
+
void visit(StorePtr v) override;
|
| 48 |
+
void visit(ForPtr v) override;
|
| 49 |
+
void visit(BlockPtr v) override;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
TORCH_API void verify(StmtPtr);
|
| 53 |
+
TORCH_API void verify(ExprPtr);
|
| 54 |
+
TORCH_API void verify(ExprHandle);
|
| 55 |
+
|
| 56 |
+
} // namespace tensorexpr
|
| 57 |
+
} // namespace jit
|
| 58 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/ir_visitor.h
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/ScalarType.h>
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace jit {
|
| 8 |
+
namespace tensorexpr {
|
| 9 |
+
|
| 10 |
+
class TORCH_API IRVisitor {
|
| 11 |
+
public:
|
| 12 |
+
virtual ~IRVisitor() = default;
|
| 13 |
+
virtual void visit(AddPtr v);
|
| 14 |
+
virtual void visit(SubPtr v);
|
| 15 |
+
virtual void visit(MulPtr v);
|
| 16 |
+
virtual void visit(DivPtr v);
|
| 17 |
+
virtual void visit(ModPtr v);
|
| 18 |
+
virtual void visit(MaxPtr v);
|
| 19 |
+
virtual void visit(MinPtr v);
|
| 20 |
+
virtual void visit(AndPtr v);
|
| 21 |
+
virtual void visit(OrPtr v);
|
| 22 |
+
virtual void visit(XorPtr v);
|
| 23 |
+
virtual void visit(LshiftPtr v);
|
| 24 |
+
virtual void visit(RshiftPtr v);
|
| 25 |
+
virtual void visit(CompareSelectPtr v);
|
| 26 |
+
|
| 27 |
+
#define IMM_PRINT_VISIT(Type, Name) virtual void visit(Name##ImmPtr v);
|
| 28 |
+
|
| 29 |
+
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, IMM_PRINT_VISIT)
|
| 30 |
+
#undef IMM_PRINT_VISIT
|
| 31 |
+
|
| 32 |
+
virtual void visit(CastPtr v);
|
| 33 |
+
virtual void visit(BitCastPtr v);
|
| 34 |
+
virtual void visit(VarPtr v);
|
| 35 |
+
virtual void visit(BufPtr v);
|
| 36 |
+
virtual void visit(RampPtr v);
|
| 37 |
+
virtual void visit(LoadPtr v);
|
| 38 |
+
virtual void visit(ForPtr v);
|
| 39 |
+
virtual void visit(BlockPtr v);
|
| 40 |
+
virtual void visit(StorePtr v);
|
| 41 |
+
virtual void visit(BroadcastPtr v);
|
| 42 |
+
virtual void visit(IfThenElsePtr v);
|
| 43 |
+
virtual void visit(IntrinsicsPtr v);
|
| 44 |
+
virtual void visit(AllocatePtr v);
|
| 45 |
+
virtual void visit(FreePtr v);
|
| 46 |
+
virtual void visit(FreeExtPtr v);
|
| 47 |
+
virtual void visit(PlacementAllocatePtr v);
|
| 48 |
+
virtual void visit(LetPtr v);
|
| 49 |
+
virtual void visit(CondPtr v);
|
| 50 |
+
virtual void visit(TermPtr v);
|
| 51 |
+
virtual void visit(PolynomialPtr v);
|
| 52 |
+
virtual void visit(RoundOffPtr v);
|
| 53 |
+
virtual void visit(MaxTermPtr v);
|
| 54 |
+
virtual void visit(MinTermPtr v);
|
| 55 |
+
virtual void visit(ReduceOpPtr v);
|
| 56 |
+
virtual void visit(AtomicAddPtr v);
|
| 57 |
+
virtual void visit(SyncThreadsPtr v);
|
| 58 |
+
virtual void visit(ExternalCallPtr v);
|
| 59 |
+
virtual void visit(ExternalCallWithAllocPtr v);
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
} // namespace tensorexpr
|
| 63 |
+
} // namespace jit
|
| 64 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/kernel.h
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 4 |
+
#include <torch/csrc/jit/passes/symbolic_shape_runtime_fusion.h>
|
| 5 |
+
#include <torch/csrc/jit/passes/utils/subgraph_utils.h>
|
| 6 |
+
#include <torch/csrc/jit/runtime/interpreter.h>
|
| 7 |
+
#include <torch/csrc/jit/tensorexpr/analysis.h>
|
| 8 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 9 |
+
#include <torch/csrc/jit/tensorexpr/lowerings.h>
|
| 10 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 11 |
+
|
| 12 |
+
namespace torch {
|
| 13 |
+
namespace jit {
|
| 14 |
+
namespace tensorexpr {
|
| 15 |
+
|
| 16 |
+
struct SmallSizeTPairHash {
|
| 17 |
+
public:
|
| 18 |
+
std::size_t operator()(const std::pair<size_t, size_t>& x) const {
|
| 19 |
+
// hashing input index and then dim index
|
| 20 |
+
return x.first * 128 + x.second;
|
| 21 |
+
}
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
// Returns true if the TE fuser supports this conv2d.
|
| 25 |
+
bool conv2dIsSupportedJit(const Node* node);
|
| 26 |
+
// Returns true if the TE fuser supports this conv2d with mkldnn prepacked conv.
|
| 27 |
+
bool mkldnnPrepackedConvIsSupportedJit(const Node* node);
|
| 28 |
+
// Returns true if the TE _convolution node is Conv2d.
|
| 29 |
+
bool isConv2d(const Node* node);
|
| 30 |
+
// Returns true if the TE fuser supports this matmul.
|
| 31 |
+
bool matmulIsSupported(const Node* node);
|
| 32 |
+
template <typename T>
|
| 33 |
+
inline std::vector<int64_t> bufferSizes(const T& t) {
|
| 34 |
+
std::vector<int64_t> sizes;
|
| 35 |
+
for (size_t i = 0; i < t->ndim(); i++) {
|
| 36 |
+
sizes.push_back(*intValue(t->dim(i)));
|
| 37 |
+
}
|
| 38 |
+
return sizes;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
// Get the dimensions of a value.
|
| 42 |
+
std::vector<ExprHandle> valueShape(const ArgValue& v);
|
| 43 |
+
|
| 44 |
+
// If v is a tensor, broadcast it to match the shape of axes, or return
|
| 45 |
+
// directly if v is a constant.
|
| 46 |
+
ExprHandle tensorOrConstant(
|
| 47 |
+
const ArgValue& v,
|
| 48 |
+
const std::vector<ExprHandle>& axes);
|
| 49 |
+
|
| 50 |
+
int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size);
|
| 51 |
+
|
| 52 |
+
ExprHandle broadcast(BufHandle b, const std::vector<ExprHandle>& axes);
|
| 53 |
+
|
| 54 |
+
ExprHandle constant(const ArgValue& v);
|
| 55 |
+
|
| 56 |
+
std::vector<ExprHandle> computeIndicesToBroadcast(
|
| 57 |
+
const std::vector<ExprHandle>& outputAxes,
|
| 58 |
+
const std::vector<ExprHandle>& inputSizes);
|
| 59 |
+
|
| 60 |
+
inline std::string getArgValueName(const ArgValue& a) {
|
| 61 |
+
if (std::holds_alternative<tensorexpr::BufHandle>(a)) {
|
| 62 |
+
return "BufHandle";
|
| 63 |
+
} else if (std::holds_alternative<tensorexpr::VarHandle>(a)) {
|
| 64 |
+
return "VarHandle";
|
| 65 |
+
} else if (std::holds_alternative<double>(a)) {
|
| 66 |
+
return "double";
|
| 67 |
+
} else if (std::holds_alternative<int64_t>(a)) {
|
| 68 |
+
return "int64_t";
|
| 69 |
+
} else if (std::holds_alternative<bool>(a)) {
|
| 70 |
+
return "bool";
|
| 71 |
+
} else if (std::holds_alternative<BufList>(a)) {
|
| 72 |
+
return "BufList";
|
| 73 |
+
} else if (std::holds_alternative<DoubleList>(a)) {
|
| 74 |
+
return "DoubleList";
|
| 75 |
+
} else if (std::holds_alternative<IntList>(a)) {
|
| 76 |
+
return "IntList";
|
| 77 |
+
} else if (std::holds_alternative<ArgNone>(a)) {
|
| 78 |
+
return "None";
|
| 79 |
+
} else {
|
| 80 |
+
throw std::runtime_error("ArgValue type not handled in string conversion");
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
template <class T>
|
| 85 |
+
std::vector<T> convertVecArgValue(const std::vector<ArgValue>& v) {
|
| 86 |
+
std::vector<T> res;
|
| 87 |
+
for (auto& x : v) {
|
| 88 |
+
auto val = std::get_if<T>(&x);
|
| 89 |
+
if (val) {
|
| 90 |
+
res.push_back(*val);
|
| 91 |
+
} else {
|
| 92 |
+
throw std::runtime_error(
|
| 93 |
+
"vector type not homogeneous - found " + getArgValueName(x) +
|
| 94 |
+
", expected " + getArgValueName(v[0]));
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
return res;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
class TORCH_API TensorExprKernel {
|
| 101 |
+
struct ConstantDescr {
|
| 102 |
+
BufPtr buf;
|
| 103 |
+
// Only one of ptr and node is used at a time
|
| 104 |
+
// 1) ptr for the constant tensors
|
| 105 |
+
// 2) node for the constant custom class objects
|
| 106 |
+
void* ptr = nullptr;
|
| 107 |
+
Node* node = nullptr;
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
public:
|
| 111 |
+
// Constructor Params:
|
| 112 |
+
// * subgraph
|
| 113 |
+
// - the graph that needs to be compiled.
|
| 114 |
+
// * kernel_func_name
|
| 115 |
+
// - the name that should be used for the generated kernel.
|
| 116 |
+
// * custom_lowerings
|
| 117 |
+
// - map that represents custom lowering definitions for a set of ops.
|
| 118 |
+
// * symbolic_shape_inputs
|
| 119 |
+
// - a list of symbolic graph inputs that represent the symbolic dims of
|
| 120 |
+
// the input tensors.
|
| 121 |
+
// * pre_alloc
|
| 122 |
+
// - a flag to control pre-allocation of buffers.
|
| 123 |
+
explicit TensorExprKernel(
|
| 124 |
+
const std::shared_ptr<Graph>& subgraph,
|
| 125 |
+
const std::string& kernel_func_name,
|
| 126 |
+
std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings =
|
| 127 |
+
{},
|
| 128 |
+
std::vector<int64_t> symbolic_shape_inputs = {},
|
| 129 |
+
bool pre_alloc = false,
|
| 130 |
+
std::unordered_map<
|
| 131 |
+
const torch::jit::Value*,
|
| 132 |
+
std::vector<torch::jit::StrideInput>> symbolic_strides = {});
|
| 133 |
+
|
| 134 |
+
explicit TensorExprKernel(
|
| 135 |
+
const std::shared_ptr<Graph>& subgraph,
|
| 136 |
+
std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings =
|
| 137 |
+
{},
|
| 138 |
+
std::vector<int64_t> symbolic_shape_inputs = {},
|
| 139 |
+
bool pre_alloc = false,
|
| 140 |
+
std::unordered_map<
|
| 141 |
+
const torch::jit::Value*,
|
| 142 |
+
std::vector<torch::jit::StrideInput>> symbolic_strides = {})
|
| 143 |
+
: TensorExprKernel(
|
| 144 |
+
subgraph,
|
| 145 |
+
SubgraphUtils::generateNameForGraph(subgraph),
|
| 146 |
+
custom_lowerings,
|
| 147 |
+
symbolic_shape_inputs,
|
| 148 |
+
pre_alloc,
|
| 149 |
+
symbolic_strides) {}
|
| 150 |
+
|
| 151 |
+
void run(Stack& stack) const;
|
| 152 |
+
void runFast(
|
| 153 |
+
const std::vector<void*>& inputs,
|
| 154 |
+
const std::vector<void*>& outputs) const;
|
| 155 |
+
// Expected format of stack:
|
| 156 |
+
// ... <outputs> <inputs>
|
| 157 |
+
// i.e., output IValues must be below the input IValues in the stack.
|
| 158 |
+
void runWithAllocatedOutputs(Stack& stack) const;
|
| 159 |
+
|
| 160 |
+
void fallback(Stack& stack) const {
|
| 161 |
+
InterpreterState(code_).run(stack);
|
| 162 |
+
}
|
| 163 |
+
void recompile();
|
| 164 |
+
|
| 165 |
+
StmtPtr getCodeGenStmt();
|
| 166 |
+
|
| 167 |
+
std::string getCodeText(const std::string& attr = "") {
|
| 168 |
+
return codegen_->getCodeText(attr);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
const std::shared_ptr<Graph> graph() {
|
| 172 |
+
return graph_;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
const std::vector<ConstantDescr>& getConstantDescriptors() const {
|
| 176 |
+
return constants_;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
const std::vector<CodeGen::BufferArg>& getBufferArgs() const {
|
| 180 |
+
return bufferArgs_;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
const std::string& getKernelName() const {
|
| 184 |
+
return codegen_->kernel_func_name();
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
const std::vector<int64_t>& getSymbolicShapeInputs() const {
|
| 188 |
+
return symbolic_shape_inputs_;
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
private:
|
| 192 |
+
enum BackendType {
|
| 193 |
+
kUninitialized,
|
| 194 |
+
kSimpleIREval,
|
| 195 |
+
kLLVMCodeGen,
|
| 196 |
+
kCudaCodeGen,
|
| 197 |
+
kBlockCodeGen,
|
| 198 |
+
};
|
| 199 |
+
|
| 200 |
+
enum MemoryLayoutPolicy {
|
| 201 |
+
kContiguous,
|
| 202 |
+
kChannelsLastNdContiguous,
|
| 203 |
+
};
|
| 204 |
+
|
| 205 |
+
void compile();
|
| 206 |
+
void genInputDebugNames();
|
| 207 |
+
void runKernel(Stack& stack) const;
|
| 208 |
+
|
| 209 |
+
std::vector<ExprHandle> sizesForValue(const torch::jit::Value* v);
|
| 210 |
+
|
| 211 |
+
// These functions broadcast shape and also store a `hasBroadcast_` variable.
|
| 212 |
+
std::vector<ExprHandle> broadcastShapesMut(
|
| 213 |
+
const std::vector<ExprHandle>& a,
|
| 214 |
+
const std::vector<ExprHandle>& b);
|
| 215 |
+
std::vector<ExprHandle> broadcastShapesMut(
|
| 216 |
+
std::vector<std::vector<ExprHandle>> shapes);
|
| 217 |
+
|
| 218 |
+
ArgValue toArg(const torch::jit::Value* v) const;
|
| 219 |
+
ExprHandle constant(const torch::jit::Value* v);
|
| 220 |
+
|
| 221 |
+
Tensor computeValue(const torch::jit::Value* v);
|
| 222 |
+
|
| 223 |
+
void bindConstant(const torch::jit::Value* v);
|
| 224 |
+
|
| 225 |
+
StmtPtr transformLoops(BackendType backendType, StmtPtr st);
|
| 226 |
+
|
| 227 |
+
std::string getCodeGenName(BackendType backendType);
|
| 228 |
+
|
| 229 |
+
void getStaticOutputSizesAndStrides(
|
| 230 |
+
const at::ArrayRef<IValue>& inputs,
|
| 231 |
+
std::vector<std::vector<int64_t>>* static_sizes,
|
| 232 |
+
std::vector<std::vector<int64_t>>* static_strides) const;
|
| 233 |
+
|
| 234 |
+
std::vector<CodeGen::CallArg> prepareRunArgs(
|
| 235 |
+
const at::ArrayRef<IValue>& inputs,
|
| 236 |
+
std::vector<at::Tensor>& outputs) const;
|
| 237 |
+
BackendType inferBackendTypeFromDevice(at::Device device);
|
| 238 |
+
|
| 239 |
+
Tensor bindInput(const torch::jit::Value* input);
|
| 240 |
+
BlockPtr bindAllInputs();
|
| 241 |
+
|
| 242 |
+
// Deduce the memory layout policy to be propagated within
|
| 243 |
+
// NNC fusion group. The memory layout policy could be `kContiguous`
|
| 244 |
+
// or `kChannelsLastNdContiguous`.
|
| 245 |
+
// `kContiguous`: Always convert the non-contiguous input tensors and
|
| 246 |
+
// internal buffers to contiguous.
|
| 247 |
+
// `kChannelsLastNdContiguous`: Always convert the input tensors and
|
| 248 |
+
// internal buffers to channels-last contiguous.
|
| 249 |
+
// Currently, the rule is simple.
|
| 250 |
+
// If all the input and out tensors of NNC fusion group are channels-last
|
| 251 |
+
// contiguous, the policy is `kChannelsLastNdContiguous`. Otherwise, it
|
| 252 |
+
// is always `kContiguous`.
|
| 253 |
+
void deduceMemoryLayoutPolicy();
|
| 254 |
+
|
| 255 |
+
Tensor convertSymbolicOutputToCorrectStrides(torch::jit::Value* v);
|
| 256 |
+
Tensor convertStaticShapeOutputToCorrectStrides(torch::jit::Value* v);
|
| 257 |
+
Tensor convertSymbolicOutputToCorrectStrides(
|
| 258 |
+
const std::vector<ExprHandle>& sizes,
|
| 259 |
+
const std::vector<size_t>& sorted_stride_indices_descending,
|
| 260 |
+
const std::vector<ExprPtr>& strides,
|
| 261 |
+
BufPtr& buf);
|
| 262 |
+
|
| 263 |
+
NNCLoweringFunction getCustomLoweringFor(c10::Symbol op) const;
|
| 264 |
+
std::unordered_map<c10::Symbol, NNCLoweringFunction> getCustomLowerings()
|
| 265 |
+
const {
|
| 266 |
+
return custom_lowerings_;
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
// Allocate memory for intermediate buffers at compile time.
|
| 270 |
+
// Specifically, we pre-allocate memory for intermediate buffers with static
|
| 271 |
+
// size and manage these buffers in the way we manage JIT constant tensors:
|
| 272 |
+
// push the buf args into the stack so NNC IR can access them at runtime.
|
| 273 |
+
std::vector<BufPtr> preAllocIntermediateBufs(
|
| 274 |
+
const std::vector<BufPtr>& interm_bufs);
|
| 275 |
+
|
| 276 |
+
struct UnpackedTensorOptions {
|
| 277 |
+
c10::optional<c10::ScalarType> dtype;
|
| 278 |
+
c10::optional<c10::Layout> layout;
|
| 279 |
+
c10::optional<c10::Device> device;
|
| 280 |
+
c10::optional<bool> pinned_memory;
|
| 281 |
+
|
| 282 |
+
UnpackedTensorOptions(const c10::TensorOptions& opts)
|
| 283 |
+
: dtype(c10::optTypeMetaToScalarType(opts.dtype_opt())),
|
| 284 |
+
layout(opts.layout_opt()),
|
| 285 |
+
device(opts.device_opt()),
|
| 286 |
+
pinned_memory(opts.pinned_memory_opt()) {}
|
| 287 |
+
};
|
| 288 |
+
|
| 289 |
+
ExprHandle getVarForShape(const c10::ShapeSymbol& ss);
|
| 290 |
+
std::vector<ExprHandle> computeInputTensorDims(
|
| 291 |
+
const torch::jit::Value* input);
|
| 292 |
+
ExprHandle getStrideArg(size_t tensor_input, size_t stride_index);
|
| 293 |
+
std::vector<ExprHandle> sizesFromSymbolicShape(
|
| 294 |
+
const c10::SymbolicShape& shape);
|
| 295 |
+
std::vector<ExprHandle> getInputStrides(
|
| 296 |
+
const torch::jit::Value* input,
|
| 297 |
+
const std::vector<ExprHandle>& inputTensorDims);
|
| 298 |
+
std::vector<torch::jit::StrideInput>& getSymbolicStrideDesc(
|
| 299 |
+
const torch::jit::Value* value);
|
| 300 |
+
|
| 301 |
+
// Apply the optimizations to the graph owned by the current fusion group,
|
| 302 |
+
// like concatenation optimization, post-op fusion, and some other graph-level
|
| 303 |
+
// optimizations.
|
| 304 |
+
void optimizeOwningGraph();
|
| 305 |
+
|
| 306 |
+
int64_t nInputs_ = 0;
|
| 307 |
+
int64_t nOutputs_ = 0;
|
| 308 |
+
std::vector<CodeGen::BufferArg> bufferArgs_;
|
| 309 |
+
std::vector<std::vector<int64_t>> tensorOutputSizes_;
|
| 310 |
+
std::vector<std::vector<int64_t>> tensorOutputStrides_;
|
| 311 |
+
std::vector<torch::jit::StrideInput> tensorOutputStrideDesc_;
|
| 312 |
+
std::vector<bool> isOutputScalar_;
|
| 313 |
+
std::vector<UnpackedTensorOptions> tensorOutputTensorOptions_;
|
| 314 |
+
std::unordered_set<BufPtr> bufOutputs_;
|
| 315 |
+
std::unordered_set<BufPtr> bufsToBeParallelized_;
|
| 316 |
+
std::unordered_map<const torch::jit::Value*, BufPtr> bufs_;
|
| 317 |
+
std::unordered_map<const torch::jit::Value*, VarHandle> scalars_;
|
| 318 |
+
std::unordered_map<const torch::jit::Value*, std::string> input_name_map_;
|
| 319 |
+
std::unique_ptr<CodeGen> codegen_;
|
| 320 |
+
at::Device device_ = at::kCPU;
|
| 321 |
+
std::shared_ptr<Graph> graph_;
|
| 322 |
+
Code code_;
|
| 323 |
+
bool allow_fallback_{false};
|
| 324 |
+
bool use_fallback_{false};
|
| 325 |
+
bool hasRandom_{false};
|
| 326 |
+
bool hasBroadcast_{false};
|
| 327 |
+
std::unordered_map<const torch::jit::Value*, std::vector<ExprHandle>>
|
| 328 |
+
known_sizes_;
|
| 329 |
+
|
| 330 |
+
std::vector<std::vector<ExprHandle>> tensorOutputSymbolicSizes_;
|
| 331 |
+
// A map from ShapeSymbol.value() to the corresponding Var.
|
| 332 |
+
std::unordered_map<int64_t, VarHandle> shapeSymbolToVar_;
|
| 333 |
+
std::unordered_map<ExprPtr, size_t> shapeSymbolInputPos_;
|
| 334 |
+
// List of values corresponding to the ShapeSymbols that are inputs to
|
| 335 |
+
// kernel being compiled. The order of these values correspond to the order
|
| 336 |
+
// of the symbolic inputs at the end of the list of inputs to the kernel.
|
| 337 |
+
std::vector<int64_t> symbolic_shape_inputs_;
|
| 338 |
+
bool has_symbolic_shapes_{false};
|
| 339 |
+
|
| 340 |
+
std::vector<at::Tensor> unpacked_constant_tensors_;
|
| 341 |
+
std::vector<ConstantDescr> constants_;
|
| 342 |
+
|
| 343 |
+
std::unordered_map<c10::Symbol, NNCLoweringFunction> custom_lowerings_;
|
| 344 |
+
StmtPtr stmt_ = nullptr;
|
| 345 |
+
bool pre_alloc_{false};
|
| 346 |
+
std::string kernel_func_name_;
|
| 347 |
+
|
| 348 |
+
// index of stack, stride index of tensor that will be appended as a codegen
|
| 349 |
+
// arg
|
| 350 |
+
std::vector<std::pair<size_t, size_t>> input_stride_args_;
|
| 351 |
+
// map from <input index, tensor dimension> to stride as arg VarHandle
|
| 352 |
+
std::unordered_map<std::pair<size_t, size_t>, VarHandle, SmallSizeTPairHash>
|
| 353 |
+
strideArgToVar_;
|
| 354 |
+
std::unordered_map<
|
| 355 |
+
const torch::jit::Value*,
|
| 356 |
+
std::vector<torch::jit::StrideInput>>
|
| 357 |
+
symbolic_strides_;
|
| 358 |
+
|
| 359 |
+
// Memory layout to be propagated with fusion group
|
| 360 |
+
MemoryLayoutPolicy memory_layout_policy_ = MemoryLayoutPolicy::kContiguous;
|
| 361 |
+
};
|
| 362 |
+
|
| 363 |
+
TORCH_API int& getTECudaPointwiseLoopLevels();
|
| 364 |
+
TORCH_API int& getTECudaPointwiseBlockCount();
|
| 365 |
+
TORCH_API int& getTECudaPointwiseBlockSize();
|
| 366 |
+
TORCH_API bool& getTEGenerateBlockCode();
|
| 367 |
+
TORCH_API bool& getTEMustUseLLVMOnCPU();
|
| 368 |
+
TORCH_API bool fallbackAllowed();
|
| 369 |
+
TORCH_API bool setFallbackAllowed(bool value);
|
| 370 |
+
TORCH_API bool& getCatWoConditionals();
|
| 371 |
+
TORCH_API bool& getOptConditionals();
|
| 372 |
+
|
| 373 |
+
TORCH_API c10::optional<at::Device> pickDeviceType(
|
| 374 |
+
const at::ArrayRef<torch::jit::Value*>& inputs);
|
| 375 |
+
|
| 376 |
+
bool isContiguous(
|
| 377 |
+
const torch::jit::Value* v,
|
| 378 |
+
at::MemoryFormat memory_format = at::MemoryFormat::Contiguous);
|
| 379 |
+
|
| 380 |
+
} // namespace tensorexpr
|
| 381 |
+
} // namespace jit
|
| 382 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_codegen.h
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef TORCH_ENABLE_LLVM
|
| 4 |
+
#include <torch/csrc/Export.h>
|
| 5 |
+
|
| 6 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 7 |
+
#include <torch/csrc/jit/tensorexpr/ir.h>
|
| 8 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 9 |
+
|
| 10 |
+
#include <c10/util/Optional.h>
|
| 11 |
+
|
| 12 |
+
#include <unordered_map>
|
| 13 |
+
#include <vector>
|
| 14 |
+
|
| 15 |
+
namespace torch {
|
| 16 |
+
namespace jit {
|
| 17 |
+
namespace tensorexpr {
|
| 18 |
+
|
| 19 |
+
class LLVMCodeGenImpl;
|
| 20 |
+
class LLVMCodeGenCallee;
|
| 21 |
+
|
| 22 |
+
class TORCH_API LLVMCodeGen : public CodeGen {
|
| 23 |
+
public:
|
| 24 |
+
explicit LLVMCodeGen(
|
| 25 |
+
StmtPtr stmt,
|
| 26 |
+
const std::vector<BufferArg>& args,
|
| 27 |
+
at::Device device = at::kCPU,
|
| 28 |
+
const std::string& kernel_func_name = "func",
|
| 29 |
+
Dtype dtype = kInt,
|
| 30 |
+
c10::optional<std::string> triple = c10::nullopt,
|
| 31 |
+
c10::optional<std::string> cpu = c10::nullopt,
|
| 32 |
+
c10::optional<std::string> attrs = c10::nullopt);
|
| 33 |
+
explicit LLVMCodeGen(StmtPtr stmt);
|
| 34 |
+
|
| 35 |
+
LLVMCodeGen() = delete;
|
| 36 |
+
~LLVMCodeGen() override;
|
| 37 |
+
|
| 38 |
+
// Cleans up all the memory used during LLVM code generation pass except
|
| 39 |
+
// the generated kernel. After calling this method, users should not call
|
| 40 |
+
// methods like `getCodeText` that require the LLVMCodeGenImpl data. However,
|
| 41 |
+
// users can continue to call this kernel using `call` and `call_raw`.
|
| 42 |
+
void cleanup_memory();
|
| 43 |
+
|
| 44 |
+
TORCH_API void call(const std::vector<CallArg>& args) override;
|
| 45 |
+
TORCH_API void call_raw(const std::vector<void*>& args) override;
|
| 46 |
+
TORCH_API void call_with_numel(void** args, int64_t numel) override;
|
| 47 |
+
|
| 48 |
+
at::Tensor empty_strided(
|
| 49 |
+
c10::IntArrayRef size,
|
| 50 |
+
c10::IntArrayRef stride,
|
| 51 |
+
c10::optional<c10::ScalarType> dtype_opt,
|
| 52 |
+
c10::optional<c10::Layout> layout_opt,
|
| 53 |
+
c10::optional<c10::Device> device_opt,
|
| 54 |
+
c10::optional<bool> pin_memory_opt) override;
|
| 55 |
+
|
| 56 |
+
template <typename T>
|
| 57 |
+
T value() {
|
| 58 |
+
return value<T>(nullptr);
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
template <typename T>
|
| 62 |
+
T value(std::vector<void*>& args) {
|
| 63 |
+
return value<T>(args.data());
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
template <typename T>
|
| 67 |
+
T value(void** args) {
|
| 68 |
+
T (*fp)(void**) = (T(*)(void**))getKernelAddress(callee_.get());
|
| 69 |
+
T rv = fp(args);
|
| 70 |
+
return rv;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
std::string getCodeText(const std::string& attr = "") override;
|
| 74 |
+
|
| 75 |
+
private:
|
| 76 |
+
void* getKernelAddress(LLVMCodeGenCallee* callee);
|
| 77 |
+
|
| 78 |
+
std::unique_ptr<LLVMCodeGenCallee> callee_;
|
| 79 |
+
std::unique_ptr<LLVMCodeGenImpl> impl_;
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
struct TORCH_API LLVMCodeGenBuilder {
|
| 83 |
+
using BufferArg = CodeGen::BufferArg;
|
| 84 |
+
|
| 85 |
+
LLVMCodeGenBuilder(StmtPtr stmt, std::vector<BufferArg> args)
|
| 86 |
+
: stmt_(stmt), args_(std::move(args)) {}
|
| 87 |
+
|
| 88 |
+
LLVMCodeGenBuilder& device(at::Device device) {
|
| 89 |
+
device_ = device;
|
| 90 |
+
return *this;
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
LLVMCodeGenBuilder& kernelFuncName(std::string name) {
|
| 94 |
+
kernelFuncName_ = std::move(name);
|
| 95 |
+
return *this;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
LLVMCodeGenBuilder& dtype(Dtype d) {
|
| 99 |
+
dtype_ = d;
|
| 100 |
+
return *this;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
LLVMCodeGenBuilder& triple(std::string triple) {
|
| 104 |
+
triple_ = std::move(triple);
|
| 105 |
+
return *this;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
LLVMCodeGenBuilder& cpu(std::string cpu) {
|
| 109 |
+
cpu_ = std::move(cpu);
|
| 110 |
+
return *this;
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
LLVMCodeGenBuilder& attrs(std::string attrs) {
|
| 114 |
+
attrs_ = std::move(attrs);
|
| 115 |
+
return *this;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
std::unique_ptr<LLVMCodeGen> build() {
|
| 119 |
+
return std::make_unique<LLVMCodeGen>(
|
| 120 |
+
stmt_, args_, device_, kernelFuncName_, dtype_, triple_, cpu_, attrs_);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
private:
|
| 124 |
+
StmtPtr stmt_;
|
| 125 |
+
std::vector<BufferArg> args_;
|
| 126 |
+
at::Device device_ = at::kCPU;
|
| 127 |
+
std::string kernelFuncName_ = "func";
|
| 128 |
+
Dtype dtype_ = kInt;
|
| 129 |
+
c10::optional<std::string> triple_ = c10::nullopt;
|
| 130 |
+
c10::optional<std::string> cpu_ = c10::nullopt;
|
| 131 |
+
c10::optional<std::string> attrs_ = c10::nullopt;
|
| 132 |
+
};
|
| 133 |
+
|
| 134 |
+
TORCH_API c10::optional<std::string>& LLVMTargetTriple();
|
| 135 |
+
TORCH_API c10::optional<std::string>& LLVMTargetCPU();
|
| 136 |
+
TORCH_API c10::optional<std::string>& LLVMTargetAttrs();
|
| 137 |
+
TORCH_API bool& LLVMAOTWorkflow();
|
| 138 |
+
|
| 139 |
+
} // namespace tensorexpr
|
| 140 |
+
} // namespace jit
|
| 141 |
+
} // namespace torch
|
| 142 |
+
|
| 143 |
+
#endif // TORCH_ENABLE_LLVM
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/llvm_jit.h
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef TORCH_ENABLE_LLVM
|
| 4 |
+
#include <c10/macros/Macros.h>
|
| 5 |
+
#include <c10/util/Exception.h>
|
| 6 |
+
#include <c10/util/Optional.h>
|
| 7 |
+
#include <torch/csrc/Export.h>
|
| 8 |
+
|
| 9 |
+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wsuggest-override")
|
| 10 |
+
#include <llvm/ExecutionEngine/JITSymbol.h>
|
| 11 |
+
C10_DIAGNOSTIC_POP()
|
| 12 |
+
#include <llvm/ExecutionEngine/Orc/Core.h>
|
| 13 |
+
#include <llvm/ExecutionEngine/Orc/ThreadSafeModule.h>
|
| 14 |
+
#include <llvm/Target/TargetMachine.h>
|
| 15 |
+
|
| 16 |
+
#include <memory>
|
| 17 |
+
#include <string>
|
| 18 |
+
|
| 19 |
+
namespace torch {
|
| 20 |
+
namespace jit {
|
| 21 |
+
namespace tensorexpr {
|
| 22 |
+
|
| 23 |
+
inline std::string formatError(llvm::Error&& err, const char* msg) {
|
| 24 |
+
static constexpr const char* defaultErrorMsg =
|
| 25 |
+
"Unexpected failure in LLVM JIT";
|
| 26 |
+
std::string errorMsg(msg ? msg : defaultErrorMsg);
|
| 27 |
+
llvm::raw_string_ostream ss(errorMsg);
|
| 28 |
+
ss << ": " << err;
|
| 29 |
+
return ss.str();
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
template <typename T>
|
| 33 |
+
T assertSuccess(llvm::Expected<T> valOrErr, const char* msg = nullptr) {
|
| 34 |
+
TORCH_INTERNAL_ASSERT(valOrErr, formatError(valOrErr.takeError(), msg));
|
| 35 |
+
return std::move(*valOrErr);
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
inline void assertSuccess(llvm::Error err, const char* msg = nullptr) {
|
| 39 |
+
TORCH_INTERNAL_ASSERT(!err, formatError(std::move(err), msg));
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
} // namespace tensorexpr
|
| 43 |
+
} // namespace jit
|
| 44 |
+
} // namespace torch
|
| 45 |
+
|
| 46 |
+
namespace llvm {
|
| 47 |
+
namespace orc {
|
| 48 |
+
|
| 49 |
+
class PytorchLLVMJITImpl;
|
| 50 |
+
|
| 51 |
+
class TORCH_API PytorchLLVMJIT {
|
| 52 |
+
public:
|
| 53 |
+
PytorchLLVMJIT(
|
| 54 |
+
c10::optional<std::string> triple,
|
| 55 |
+
c10::optional<std::string> cpu,
|
| 56 |
+
c10::optional<std::string> attrs);
|
| 57 |
+
~PytorchLLVMJIT();
|
| 58 |
+
|
| 59 |
+
void addModule(std::unique_ptr<Module> M, std::unique_ptr<LLVMContext> C);
|
| 60 |
+
|
| 61 |
+
JITSymbol findSymbol(const std::string Name);
|
| 62 |
+
|
| 63 |
+
bool hasSymbol(const std::string& Name);
|
| 64 |
+
|
| 65 |
+
TargetMachine& getTargetMachine();
|
| 66 |
+
|
| 67 |
+
const DataLayout& getDataLayout();
|
| 68 |
+
|
| 69 |
+
private:
|
| 70 |
+
// Use the PImpl idiom here to hide the no-rtti parts of the JIT structure.
|
| 71 |
+
std::unique_ptr<PytorchLLVMJITImpl> impl_;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
} // end namespace orc
|
| 75 |
+
} // end namespace llvm
|
| 76 |
+
|
| 77 |
+
#endif // ENABLE LLVM
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest.h
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <string>
|
| 4 |
+
#include <unordered_map>
|
| 5 |
+
#include <unordered_set>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
#include <torch/csrc/Export.h>
|
| 9 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace jit {
|
| 13 |
+
namespace tensorexpr {
|
| 14 |
+
|
| 15 |
+
class Expr;
|
| 16 |
+
class Var;
|
| 17 |
+
class Buf;
|
| 18 |
+
class Tensor;
|
| 19 |
+
class Function;
|
| 20 |
+
class Stmt;
|
| 21 |
+
class For;
|
| 22 |
+
class Block;
|
| 23 |
+
class Store;
|
| 24 |
+
class Dtype;
|
| 25 |
+
|
| 26 |
+
class TORCH_API LoopNest {
|
| 27 |
+
public:
|
| 28 |
+
// A constructor for building a LoopNest from a list of Tensors
|
| 29 |
+
LoopNest(
|
| 30 |
+
const std::vector<Tensor>& output_tensors,
|
| 31 |
+
const std::vector<Tensor>& tensors_to_compute);
|
| 32 |
+
|
| 33 |
+
// A convenience constructor for the case when all tensors are output tensors
|
| 34 |
+
LoopNest(const std::vector<Tensor>& output_tensors);
|
| 35 |
+
|
| 36 |
+
// A constructor for building a LoopNest from an Stmt and a list of output
|
| 37 |
+
// buffers.
|
| 38 |
+
LoopNest(StmtPtr stmt, std::unordered_set<BufPtr> output_bufs);
|
| 39 |
+
|
| 40 |
+
// A constructor for building a LoopNest from another loopnest. It clones the
|
| 41 |
+
// other loopnest's stmt.
|
| 42 |
+
LoopNest(const LoopNest& other);
|
| 43 |
+
|
| 44 |
+
StmtPtr root_stmt() const {
|
| 45 |
+
return root_stmt_;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
std::vector<ForPtr> getLoopStmtsFor(Tensor) const;
|
| 49 |
+
std::vector<ForPtr> getLoopStmtsFor(BufPtr) const;
|
| 50 |
+
std::vector<ForPtr> getLoopStmtsFor(StmtPtr) const;
|
| 51 |
+
StmtPtr getLoopBodyFor(Tensor) const;
|
| 52 |
+
StmtPtr getLoopBodyFor(BufPtr) const;
|
| 53 |
+
|
| 54 |
+
// Returns the For stmt indexed by 'indices' in the 'root' For stmt.
|
| 55 |
+
//'indices' indicates the path to the returned loop from 'root' in AST, e.g.,
|
| 56 |
+
//
|
| 57 |
+
// root: for(int i...){
|
| 58 |
+
// j_loop: for (int j...){
|
| 59 |
+
// k1_loop: for (int k1...){
|
| 60 |
+
// A[i, j, k1] = ....
|
| 61 |
+
// }
|
| 62 |
+
// B[i, j] = ...
|
| 63 |
+
// k2_loop: for (int k2...){
|
| 64 |
+
// A[i, j, k2] = ...
|
| 65 |
+
// }
|
| 66 |
+
// }
|
| 67 |
+
// }
|
| 68 |
+
//
|
| 69 |
+
// the path from 'root' to 'j_loop' is [0]
|
| 70 |
+
// the path from 'root' to 'k1_loop' is [0, 0]
|
| 71 |
+
// the path from 'root' to 'k2_loop' is [0, 2]
|
| 72 |
+
ForPtr getLoopAt(ForPtr root, const std::vector<int>& indices) const;
|
| 73 |
+
|
| 74 |
+
// Returns the For stmt that is immediately enclosing the given stmt.
|
| 75 |
+
static ForPtr getParentLoop(StmtPtr st);
|
| 76 |
+
|
| 77 |
+
// Returns the list of For stmts corresponding to the loopnest that is
|
| 78 |
+
// enclosing the given stmt.
|
| 79 |
+
static std::vector<ForPtr> getEnclosingLoopNest(StmtPtr st);
|
| 80 |
+
|
| 81 |
+
// Returns a list of all Stmts that write to the given buf.
|
| 82 |
+
std::vector<StmtPtr> getAllWritesToBuf(BufPtr) const;
|
| 83 |
+
|
| 84 |
+
// The following methods return the For loops that contain writes to
|
| 85 |
+
// the given buf.
|
| 86 |
+
//
|
| 87 |
+
// For example, consider the following code:
|
| 88 |
+
// for i1
|
| 89 |
+
// for j1
|
| 90 |
+
// a[i1,j1] =
|
| 91 |
+
// for i2
|
| 92 |
+
// for j2
|
| 93 |
+
// for k2
|
| 94 |
+
// a[i2,j2] =
|
| 95 |
+
// for j3
|
| 96 |
+
// a[i2,j3] =
|
| 97 |
+
|
| 98 |
+
// Returns a list of For loops which directly contain a Stmt that writes
|
| 99 |
+
// to buf.
|
| 100 |
+
// For the above example:
|
| 101 |
+
// getAllInnermostLoopsWritingToBuf(a) => {j1, k2, j3}
|
| 102 |
+
std::vector<ForPtr> getAllInnermostLoopsWritingToBuf(BufPtr) const;
|
| 103 |
+
|
| 104 |
+
// Returns a list of For loopnests which contain a Stmt that writes to
|
| 105 |
+
// the given buf. Each loopnest here is a vector For loops.
|
| 106 |
+
// For the above example:
|
| 107 |
+
// getAllLoopNestsWritingToBuf(a) => {{i1,j1}, {i2,j2,k2}, {i2,j3}}
|
| 108 |
+
std::vector<std::vector<ForPtr>> getAllLoopNestsWritingToBuf(BufPtr) const;
|
| 109 |
+
|
| 110 |
+
StmtPtr simplify();
|
| 111 |
+
|
| 112 |
+
// Sanitize variables and buffer names.
|
| 113 |
+
// The pass assigns predefined names for loop index variables
|
| 114 |
+
// (i,j,k,l,m,n,o,p,i1,j1,k1,...) and ensures these names are not conflicting
|
| 115 |
+
// anywhere. It also removes duplicates from other Buf nad Var names as well
|
| 116 |
+
// as replaces illegal characters in them with underscores.
|
| 117 |
+
//
|
| 118 |
+
// Note: since it's currently technically possible to use the same variable
|
| 119 |
+
// as index in two different loops, this transformation finds such cases and
|
| 120 |
+
// introduces new variables to avoid duplication.
|
| 121 |
+
static StmtPtr sanitizeNames(StmtPtr s);
|
| 122 |
+
|
| 123 |
+
bool computeInline(StmtPtr s);
|
| 124 |
+
bool computeInline(BufPtr b);
|
| 125 |
+
void inlineIntermediateBufs(bool allow_duplicated_work);
|
| 126 |
+
|
| 127 |
+
// Optimizes conditionals.
|
| 128 |
+
//
|
| 129 |
+
// Currently, only the following pattern of conditionals is optimized.
|
| 130 |
+
// This corresponds to the conditional format that is generated to handle
|
| 131 |
+
// `aten::cat` op.
|
| 132 |
+
//
|
| 133 |
+
// for (int i = 0; i < 20; i++) {
|
| 134 |
+
// A[i] = IfThenElse(i<5 ? 1 : 0, B[i], C[i-5])
|
| 135 |
+
// }
|
| 136 |
+
//
|
| 137 |
+
// Constraints that must be satisfied for this optimization:
|
| 138 |
+
// * All conditions should be of the form "var < expr".
|
| 139 |
+
// * All conditions should have the same variable, say v.
|
| 140 |
+
// * The condition variable found should be the same as the inner-most
|
| 141 |
+
// loop variable. TODO: Remove this constraint.
|
| 142 |
+
// * If there are multiple stores that contain conditionals using the same
|
| 143 |
+
// loop variable, only the first conditional will be optimized.
|
| 144 |
+
// TODO: Remove this constraint.
|
| 145 |
+
bool optimizeConditionals();
|
| 146 |
+
|
| 147 |
+
// Splits the given loop into 2 nested loops with the given factor as the
|
| 148 |
+
// inner loop bound. If the factor does not evenly divide the loop bound,
|
| 149 |
+
// then the remaining iterations are extracted into a tail loop that is
|
| 150 |
+
// added after the given loop.
|
| 151 |
+
//
|
| 152 |
+
// For example, consider the following code:
|
| 153 |
+
// for (int i = 0; i < 100; ++i) {
|
| 154 |
+
// A[i] =
|
| 155 |
+
// }
|
| 156 |
+
//
|
| 157 |
+
// splitWithTail(i, 8, ...) will result in:
|
| 158 |
+
// for (int i_outer = 0; i_outer < 12; ++i_outer) {
|
| 159 |
+
// for (int i_inner = 0; i_inner < 8; ++i_inner) {
|
| 160 |
+
// A[i_outer * 8 + i_inner] =
|
| 161 |
+
// }
|
| 162 |
+
// }
|
| 163 |
+
// for (int i_tail = 0; i_tail < 4; ++i_tail) {
|
| 164 |
+
// A[i_tail + 96] =
|
| 165 |
+
// }
|
| 166 |
+
//
|
| 167 |
+
// The given loop will be transformed to the outer loop after splitting.
|
| 168 |
+
// So, the pointer to the input loop should be valid after splitting and
|
| 169 |
+
// will point to the outer loop. The `inner` and `tail` parameters will be
|
| 170 |
+
// set to point to the inner and tail loops that are generated.
|
| 171 |
+
static void splitWithTail(ForPtr f, int factor, ForPtr* inner, ForPtr* tail);
|
| 172 |
+
// A convenience wrapper when the caller does not need to access the
|
| 173 |
+
// split loops.
|
| 174 |
+
static void splitWithTail(ForPtr f, int factor);
|
| 175 |
+
|
| 176 |
+
// Splits the given loop into 2 nested loops with the given factor as the
|
| 177 |
+
// inner loop bound. If the factor does not evenly divide the loop bound,
|
| 178 |
+
// then a conditional is inserted into the body to handle the remaining
|
| 179 |
+
// iterations appropriately.
|
| 180 |
+
//
|
| 181 |
+
// For example, consider the following code:
|
| 182 |
+
// for (int i = 0; i < 100; ++i) {
|
| 183 |
+
// A[i] =
|
| 184 |
+
// }
|
| 185 |
+
//
|
| 186 |
+
// splitWithMask(i, 8, ...) will result in:
|
| 187 |
+
// for (int i_outer = 0; i_outer < 13; ++i_outer) {
|
| 188 |
+
// for (int i_inner = 0; i_inner < 8; ++i_inner) {
|
| 189 |
+
// if (i_outer * 8 + i_inner < 100) {
|
| 190 |
+
// A[i_outer * 8 + i_inner] =
|
| 191 |
+
// }
|
| 192 |
+
// }
|
| 193 |
+
// }
|
| 194 |
+
//
|
| 195 |
+
// The given loop will be transformed to the outer loop after splitting.
|
| 196 |
+
// So, the pointer to the input loop should be valid after splitting and
|
| 197 |
+
// will point to the outer loop. The `inner` parameter will be set to point
|
| 198 |
+
// to the inner loop that is generated.
|
| 199 |
+
static void splitWithMask(ForPtr f, int factor, ForPtr* inner);
|
| 200 |
+
// A convenience wrapper when the caller does not need to access the
|
| 201 |
+
// split loops.
|
| 202 |
+
static void splitWithMask(ForPtr f, int factor);
|
| 203 |
+
|
| 204 |
+
// The following methods support loop distribution.
|
| 205 |
+
// For example, consider the following code. This will be used to
|
| 206 |
+
// demonstrate the methods below.
|
| 207 |
+
//
|
| 208 |
+
// S0: for m
|
| 209 |
+
// S1: for i
|
| 210 |
+
// S2: A[i] = 0
|
| 211 |
+
// S3: for j
|
| 212 |
+
// S4: A[i] = A[i] +
|
| 213 |
+
// S5: B[i] = A[i]
|
| 214 |
+
// S6: for k
|
| 215 |
+
// S7: B[i] = B[i] +
|
| 216 |
+
|
| 217 |
+
// This method distributes the given loop over its body by splitting
|
| 218 |
+
// after every given pivot stmt.
|
| 219 |
+
//
|
| 220 |
+
// NOTE: Pivot stmts that are not in the given loop's body will be ignored.
|
| 221 |
+
//
|
| 222 |
+
// For the above example:
|
| 223 |
+
// distributeLoop(S1, {S3, S5})
|
| 224 |
+
// will result in:
|
| 225 |
+
// S0: for m
|
| 226 |
+
// S1: for i
|
| 227 |
+
// S2: A[i] = 0
|
| 228 |
+
// S3: for j
|
| 229 |
+
// S4: A[i] = A[i] +
|
| 230 |
+
// : for i
|
| 231 |
+
// S5: B[i] = A[i]
|
| 232 |
+
// : for i
|
| 233 |
+
// S6: for k
|
| 234 |
+
// S7: B[i] = B[i] +
|
| 235 |
+
static std::vector<ForPtr> distributeLoop(
|
| 236 |
+
ForPtr loop,
|
| 237 |
+
const std::unordered_set<StmtPtr>& pivots);
|
| 238 |
+
|
| 239 |
+
// This method distributes the given loop over every stmt in its body.
|
| 240 |
+
//
|
| 241 |
+
// For the above example:
|
| 242 |
+
// distributeLoop(S1)
|
| 243 |
+
// will result in:
|
| 244 |
+
// S0: for m
|
| 245 |
+
// S1: for i
|
| 246 |
+
// S2: A[i] = 0
|
| 247 |
+
// : for i
|
| 248 |
+
// S3: for j
|
| 249 |
+
// S4: A[i] = A[i] +
|
| 250 |
+
// : for i
|
| 251 |
+
// S5: B[i] = A[i]
|
| 252 |
+
// : for i
|
| 253 |
+
// S6: for k
|
| 254 |
+
// S7: B[i] = B[i] +
|
| 255 |
+
static std::vector<ForPtr> distributeLoop(ForPtr loop);
|
| 256 |
+
// Same as above, but also distribute parent loops.
|
| 257 |
+
// Returns the result of distributing the outermost loop.
|
| 258 |
+
//
|
| 259 |
+
// For the above example:
|
| 260 |
+
// distributeLoopAndParents(S1) will result in:
|
| 261 |
+
// S0: for m
|
| 262 |
+
// S1: for i
|
| 263 |
+
// S2: A[i] = 0
|
| 264 |
+
// : for m
|
| 265 |
+
// : for i
|
| 266 |
+
// S3: for j
|
| 267 |
+
// S4: A[i] = A[i] +
|
| 268 |
+
// : for m
|
| 269 |
+
// : for i
|
| 270 |
+
// S5: B[i] = A[i]
|
| 271 |
+
// : for m
|
| 272 |
+
// : for i
|
| 273 |
+
// S6: for k
|
| 274 |
+
// S7: B[i] = B[i] +
|
| 275 |
+
static std::vector<ForPtr> distributeLoopAndParents(ForPtr loop);
|
| 276 |
+
|
| 277 |
+
// This method distributes the given loop over its body by splitting
|
| 278 |
+
// after every For stmt in its body.
|
| 279 |
+
//
|
| 280 |
+
// For the above example:
|
| 281 |
+
// distributeLoopOverInnerLoops(S1)
|
| 282 |
+
// will result in:
|
| 283 |
+
// S0: for m
|
| 284 |
+
// S1: for i
|
| 285 |
+
// S2: A[i] = 0
|
| 286 |
+
// S3: for j
|
| 287 |
+
// S4: A[i] = A[i] +
|
| 288 |
+
// : for i
|
| 289 |
+
// S5: B[i] = A[i]
|
| 290 |
+
// S6: for k
|
| 291 |
+
// S7: B[i] = B[i] +
|
| 292 |
+
static std::vector<ForPtr> distributeLoopOverInnerLoops(ForPtr loop);
|
| 293 |
+
// Same as above, but also distribute parent loops.
|
| 294 |
+
// Returns the result of distributing the outermost loop.
|
| 295 |
+
//
|
| 296 |
+
// For the above example:
|
| 297 |
+
// distributeLoopAndParentsOverInnerLoops(S1)
|
| 298 |
+
// will result in:
|
| 299 |
+
// S0: for m
|
| 300 |
+
// S1: for i
|
| 301 |
+
// S2: A[i] = 0
|
| 302 |
+
// S3: for j
|
| 303 |
+
// S4: A[i] = A[i] +
|
| 304 |
+
// : for m
|
| 305 |
+
// : for i
|
| 306 |
+
// S5: B[i] = A[i]
|
| 307 |
+
// S6: for k
|
| 308 |
+
// S7: B[i] = B[i] +
|
| 309 |
+
static std::vector<ForPtr> distributeLoopAndParentsOverInnerLoops(
|
| 310 |
+
ForPtr loop);
|
| 311 |
+
|
| 312 |
+
// This method performs loop fusion.
|
| 313 |
+
// For example, consider the following code.
|
| 314 |
+
//
|
| 315 |
+
// S1: for m
|
| 316 |
+
// S2: A[m] = 0
|
| 317 |
+
// S3: for j
|
| 318 |
+
// S4: A[m] = A[m] +
|
| 319 |
+
// S5: for n
|
| 320 |
+
// S5: B[n] = A[n]
|
| 321 |
+
// S6: for k
|
| 322 |
+
// S7: B[n] = B[n] +
|
| 323 |
+
//
|
| 324 |
+
// fuseLoops({S1, S5}), will return the following loop:
|
| 325 |
+
// S1: for m
|
| 326 |
+
// S2: A[m] = 0
|
| 327 |
+
// S3: for j
|
| 328 |
+
// S4: A[m] = A[m] +
|
| 329 |
+
// S5: B[m] = A[m]
|
| 330 |
+
// S6: for k
|
| 331 |
+
// S7: B[m] = B[m] +
|
| 332 |
+
//
|
| 333 |
+
// This transformation is unsafe as it simply add all loops into the body of
|
| 334 |
+
// the first loop for fusion without correctness checks.
|
| 335 |
+
//
|
| 336 |
+
// Below are the two requirements to apply unsafeFuseLoops:
|
| 337 |
+
// * All the loops have the same parent.
|
| 338 |
+
// * There are no statements between these loops in their parent body.
|
| 339 |
+
static bool unsafeFuseLoops(const std::vector<ForPtr>& loops, ForPtr* fused);
|
| 340 |
+
|
| 341 |
+
// Loop fusion is done only when all the conditions below are satisfied.
|
| 342 |
+
// * All the loops have the same parent.
|
| 343 |
+
// * There are no statements between these loops in their parent body.
|
| 344 |
+
// * The start bounds are the same for all loops.
|
| 345 |
+
// * The stop bounds are the same for all loops.
|
| 346 |
+
// * Fusing the loops does not violate or add any dependencies.
|
| 347 |
+
static bool fuseLoops(const std::vector<ForPtr>& loops, ForPtr* fused);
|
| 348 |
+
|
| 349 |
+
static void reorderAxis(ForPtr a, ForPtr b);
|
| 350 |
+
|
| 351 |
+
// Reorder the given list of loops according to the permutation specified.
|
| 352 |
+
// Here `permutation[i]` represents the position of the loop in the input
|
| 353 |
+
// which will end up at position `i` after the reorder.
|
| 354 |
+
//
|
| 355 |
+
// For example, consider the following code:
|
| 356 |
+
// for p
|
| 357 |
+
// for q
|
| 358 |
+
// for r
|
| 359 |
+
// for s
|
| 360 |
+
// A[p,q,r,s] =
|
| 361 |
+
//
|
| 362 |
+
// reorder({p, q, r, s}, {2, 3, 0, 1}) will return the list of loops in the
|
| 363 |
+
// following form:
|
| 364 |
+
// for r
|
| 365 |
+
// for s
|
| 366 |
+
// for p
|
| 367 |
+
// for q
|
| 368 |
+
// A[p,q,r,s] =
|
| 369 |
+
static std::vector<ForPtr> reorder(
|
| 370 |
+
const std::vector<ForPtr>& loops,
|
| 371 |
+
const std::vector<size_t>& permutation);
|
| 372 |
+
|
| 373 |
+
// Tile takes a 2d domain (x, y) and splits it into small rectangular blocks
|
| 374 |
+
// each with shape (x_factor, y_factor). The traversal over the domain turns
|
| 375 |
+
// into an outer iteration over the blocks and an inner traversal over all
|
| 376 |
+
// points in the block.
|
| 377 |
+
// Note that if x dim % x_factor or y dim % y_factor does not equal to 0, the
|
| 378 |
+
// loop body will generate corresponding tailing loops.
|
| 379 |
+
// The transformation is in-place and returns 'xtail'.
|
| 380 |
+
//
|
| 381 |
+
// For example, consider the following code:
|
| 382 |
+
// for i: [0, 64)
|
| 383 |
+
// for j: [0, 64)
|
| 384 |
+
// for k: [0, 32)
|
| 385 |
+
// A[i, j] = B[i, k] + C[j, k]
|
| 386 |
+
//
|
| 387 |
+
// tile(i, j, 4, 8) will transform "i" for-stmt into the following nested
|
| 388 |
+
// loop:
|
| 389 |
+
// for i_outer: [0, 16)
|
| 390 |
+
// for j_outer: [0, 8)
|
| 391 |
+
// for i_inner: [0, 4)
|
| 392 |
+
// for j_inner: [0, 8)
|
| 393 |
+
// for k: [0, 32)
|
| 394 |
+
// A[i_outer * 4 + i_inner, j_outer * 8 + j_inner] =
|
| 395 |
+
// B[i_outer * 4 + i_inner, k] + C[j_outer * 8 + j_inner, k]
|
| 396 |
+
//
|
| 397 |
+
// tile(i, j, 4, 9) will transform "i" for-stmt into the following nested
|
| 398 |
+
// loop:
|
| 399 |
+
// for i_outer: [0, 16)
|
| 400 |
+
// for j_outer: [0, 7)
|
| 401 |
+
// for i_inner: [0, 4)
|
| 402 |
+
// for j_inner: [0, 9)
|
| 403 |
+
// for k: (0, 32)
|
| 404 |
+
// A[i_outer * 4 + i_inner, j_outer * 9 + j_inner] =
|
| 405 |
+
// B[i_outer * 4 + i_inner, k] + C[j_outer * 9 + j_inner, k]
|
| 406 |
+
// for j_tail: [0, 1)
|
| 407 |
+
// for i_inner: [0, 4)
|
| 408 |
+
// for k: (0, 32)
|
| 409 |
+
// A[i_outer * 4 + i_inner, 7 * 9 + j_tail] =
|
| 410 |
+
// B[i_outer * 4 + i_inner, k] + C[7 * 9 + j_tail, k]
|
| 411 |
+
ForPtr tile(ForPtr x, ForPtr y, int x_factor, int y_factor);
|
| 412 |
+
|
| 413 |
+
// Returns true if the given loops are perfectly nested, i.e., every loop
|
| 414 |
+
// (except the innermost) should have exactly one statement in its body
|
| 415 |
+
// and that statement must be the next inner loop.
|
| 416 |
+
static bool areLoopsPerfectlyNested(const std::vector<ForPtr>& loops);
|
| 417 |
+
|
| 418 |
+
// Returns true if the given loop has a loop-carried dependence.
|
| 419 |
+
static bool hasLoopCarriedDependence(ForPtr loop);
|
| 420 |
+
|
| 421 |
+
// Unrolls all the iterations of the given loop.
|
| 422 |
+
// Requires that the loop bounds are constant.
|
| 423 |
+
static void fullUnroll(ForPtr f, StmtPtr* unrolled);
|
| 424 |
+
static void fullUnroll(ForPtr f);
|
| 425 |
+
|
| 426 |
+
// Unrolls the given loop for the specified factor.
|
| 427 |
+
// This does not require constant bounds for the loop being unrolled.
|
| 428 |
+
static void unroll(ForPtr f, int factor, ForPtr* tail);
|
| 429 |
+
static void unroll(ForPtr f, int factor);
|
| 430 |
+
|
| 431 |
+
static bool normalize(ForPtr f);
|
| 432 |
+
static bool isNormalized(ForPtr f);
|
| 433 |
+
|
| 434 |
+
static bool flatten(const std::vector<ForPtr>& f, ForPtr* flattened);
|
| 435 |
+
static bool flatten(const std::vector<ForPtr>& f);
|
| 436 |
+
|
| 437 |
+
// Compresses the given buffer based on its use in the given Stmts.
|
| 438 |
+
//
|
| 439 |
+
// NOTE: This API assumes that there are no accesses to the given buffer
|
| 440 |
+
// outside the given statement. So, this should be called with the entire
|
| 441 |
+
// kernel statement to avoid incorrect buffer compressions.
|
| 442 |
+
//
|
| 443 |
+
// For example, given the input:
|
| 444 |
+
//
|
| 445 |
+
// for (int i = 0; i < 100; ++i) {
|
| 446 |
+
// for (int j = 0; j < 200; ++j) {
|
| 447 |
+
// A[i,j] = sin(i*j)
|
| 448 |
+
// }
|
| 449 |
+
// for (int j = 0; j < 199; ++j) {
|
| 450 |
+
// B[i,j] = A[i,j] + A[i, j+1]
|
| 451 |
+
// }
|
| 452 |
+
// }
|
| 453 |
+
//
|
| 454 |
+
// compressBuffer(A, ...) will compress buffer A from
|
| 455 |
+
// [100, 200] to [1, 200] and modify the code as follows:
|
| 456 |
+
//
|
| 457 |
+
// for (int i = 0; i < 100; ++i) {
|
| 458 |
+
// for (int j = 0; j < 200; ++j) {
|
| 459 |
+
// A[0,j] = sin(i*j)
|
| 460 |
+
// }
|
| 461 |
+
// for (int j = 0; j < 199; ++j) {
|
| 462 |
+
// B[i,j] = A[0,j] + A[0, j+1]
|
| 463 |
+
// }
|
| 464 |
+
// }
|
| 465 |
+
static void compressBuffer(BufPtr buf, StmtPtr stmt);
|
| 466 |
+
|
| 467 |
+
// Compresses all buffers in the given statement.
|
| 468 |
+
//
|
| 469 |
+
// NOTE: This API assumes that there are no accesses to buffers outside
|
| 470 |
+
// the given statement. So, this should be called with the entire
|
| 471 |
+
// kernel statement to avoid incorrect buffer compressions.
|
| 472 |
+
//
|
| 473 |
+
// TODO: Add an IR verifier check to detect invalidly compressed buffers.
|
| 474 |
+
static void compressAllBuffers(StmtPtr stmt);
|
| 475 |
+
|
| 476 |
+
// Get 'num' loops from the loopnest starting at 'f'.
|
| 477 |
+
static std::vector<ForPtr> getLoopStmtsInLoopNest(ForPtr f, size_t num);
|
| 478 |
+
|
| 479 |
+
// LoopOptions are propagated to tail.
|
| 480 |
+
static void sliceHead(ForPtr f, int factor, ForPtr* head, ForPtr* tail);
|
| 481 |
+
static void sliceHead(ForPtr f, int factor);
|
| 482 |
+
// LoopOptions are propagated to head.
|
| 483 |
+
static void sliceTail(ForPtr f, int factor, ForPtr* head, ForPtr* tail);
|
| 484 |
+
static void sliceTail(ForPtr f, int factor);
|
| 485 |
+
|
| 486 |
+
using AccessResult = std::pair<BufPtr, StmtPtr>;
|
| 487 |
+
// Insert a cache for the consumer's usages of the buffer produced in
|
| 488 |
+
// consumer, and redirect reads and writes in the consumer to that cache.
|
| 489 |
+
// Returns a pair of the new cache buffer, and the new rewritten consumer.
|
| 490 |
+
static AccessResult cacheAccesses(
|
| 491 |
+
BufPtr producer,
|
| 492 |
+
const std::string& name,
|
| 493 |
+
StmtPtr consumer);
|
| 494 |
+
|
| 495 |
+
// Insert a temporary computation of statement S in the scope of loop AT.
|
| 496 |
+
// S is assumed to be a Store or a Block containing a Store. Along with the
|
| 497 |
+
// computation itself, this transformation inserts Alloc/Free statements for
|
| 498 |
+
// the temporary buffer used in the computation.
|
| 499 |
+
static void computeAt(StmtPtr s, ForPtr at);
|
| 500 |
+
|
| 501 |
+
// Rfactor a reduction axis into a normal axis.
|
| 502 |
+
//
|
| 503 |
+
// Requirements:
|
| 504 |
+
// * S is the reduction store
|
| 505 |
+
// * S is the only statement in the innermost loop
|
| 506 |
+
// * There is at least two reduction arguments in S
|
| 507 |
+
// * OUTER_REDUCTION_FOR loop corresponds to the outermost reduction variable
|
| 508 |
+
// used in the store and all other reduction variables are index variables of
|
| 509 |
+
// children loops of OUTER_REDUCTION_FOR
|
| 510 |
+
// * OUTER_REDUCTION_FOR is a perfect loop nest, i.e. it has only loops
|
| 511 |
+
// corresponding to the other reduction variables and the store, nested into
|
| 512 |
+
// each other
|
| 513 |
+
//
|
| 514 |
+
// What it does:
|
| 515 |
+
// * Introduce a new buffer with an extra dimension of a size equal to the
|
| 516 |
+
// span of the loop OUTER_REDUCTION_FOR (the new buffer is returned via
|
| 517 |
+
// RFAC_BUF_PTR)
|
| 518 |
+
// * Insert an initialization store for the new buffer in
|
| 519 |
+
// OUTER_REDUCTION_FOR before its nested loop
|
| 520 |
+
// * Replace the reduction store to the original buffer with the reduction
|
| 521 |
+
// store to the temp buffer, removing the index var of OUTER_REDUCTION_FOR
|
| 522 |
+
// from reduction arguments
|
| 523 |
+
// * Insert a final reduction store over the extra dimension of the new
|
| 524 |
+
// buffer to the original buffer
|
| 525 |
+
// * Returns TRUE if the transformation succeeded and FALSE otherwise
|
| 526 |
+
//
|
| 527 |
+
// Example:
|
| 528 |
+
// Original IR:
|
| 529 |
+
// S1: for i # normal axis
|
| 530 |
+
// S2: X[i] = 0
|
| 531 |
+
// S3: for j # reduction axis
|
| 532 |
+
// S4: for k # reduction axis
|
| 533 |
+
// S5: X[i] = ReduceOp(X[i] + Y[i,j,k], reduce_axis={j,k})
|
| 534 |
+
//
|
| 535 |
+
// After RFACTOR(S5, S3)
|
| 536 |
+
// S1: for i # normal axis
|
| 537 |
+
// S2: X[i] = 0
|
| 538 |
+
// S3: for j # reduction axis for X, normal axis for X_rfac
|
| 539 |
+
// X_rfac[i,j] = 0
|
| 540 |
+
// S4: for k # reduction axis
|
| 541 |
+
// X_rfac[i,j] = ReduceOp(X_rfac[i,j] + Y[i,j,k], reduce_axis={k})
|
| 542 |
+
// X[i] = ReduceOp(X[i] + X_rfac[i,j], reduce_axis={j})
|
| 543 |
+
static bool rfactor(StmtPtr s, ForPtr outer_reduction_for);
|
| 544 |
+
static bool rfactor(
|
| 545 |
+
StmtPtr s,
|
| 546 |
+
ForPtr outer_reduction_for,
|
| 547 |
+
BufPtr* rfac_buf_ptr);
|
| 548 |
+
|
| 549 |
+
// Vectorize the given loop. This method requires that the given loop
|
| 550 |
+
// does not perform a reduction.
|
| 551 |
+
// It returns true if vectorization is successful and false otherwise.
|
| 552 |
+
static bool vectorize(ForPtr);
|
| 553 |
+
|
| 554 |
+
// Find the inner-most loops and vectorize them. Currently, this only works
|
| 555 |
+
// for the LLVM backend, when no reductions are involved.
|
| 556 |
+
void vectorizeInnerLoops();
|
| 557 |
+
|
| 558 |
+
void eliminateDeadStores();
|
| 559 |
+
|
| 560 |
+
void prepareForCodegen();
|
| 561 |
+
|
| 562 |
+
const std::unordered_set<BufPtr> getInputBufs() const;
|
| 563 |
+
const std::unordered_set<BufPtr> getOutputBufs() const {
|
| 564 |
+
return output_bufs_;
|
| 565 |
+
}
|
| 566 |
+
std::vector<BufPtr> getIntermediateBufs() const;
|
| 567 |
+
|
| 568 |
+
// Finds which is the outer For between a and b for loops. If neither of the 2
|
| 569 |
+
// Fors is an ancestor of the other, it returns nullptr.
|
| 570 |
+
static ForPtr findOuterFor(ForPtr a, ForPtr b);
|
| 571 |
+
|
| 572 |
+
private:
|
| 573 |
+
void initialize(
|
| 574 |
+
const std::vector<Tensor>& output_tensors,
|
| 575 |
+
const std::vector<Tensor>& tensors_to_compute);
|
| 576 |
+
|
| 577 |
+
StmtPtr root_stmt_;
|
| 578 |
+
|
| 579 |
+
std::unordered_set<BufPtr> output_bufs_;
|
| 580 |
+
};
|
| 581 |
+
|
| 582 |
+
TORCH_API StmtPtr FlattenIndexes(StmtPtr s);
|
| 583 |
+
|
| 584 |
+
// TODO: Revisit this once we decide on how dependencies analysis should look
|
| 585 |
+
// like. Maybe we would choose to use a different API and BufUse would be
|
| 586 |
+
// removed, or if we decide to keep it we need to properly document its API.
|
| 587 |
+
struct BufLoadOrStoreUse {
|
| 588 |
+
StmtPtr s;
|
| 589 |
+
bool isStore;
|
| 590 |
+
};
|
| 591 |
+
|
| 592 |
+
/*
|
| 593 |
+
* Returns a map ( Buf -> uses of this Buf), uses are represented as vectors of
|
| 594 |
+
* BufUse elements, which are StmtPtr and a bool isStore flag. The order of uses
|
| 595 |
+
* in the vectors reflects the order in which the uses appear in the given
|
| 596 |
+
* statement.
|
| 597 |
+
*/
|
| 598 |
+
std::unordered_map<BufPtr, std::vector<BufLoadOrStoreUse>> findLoadOrStoreUses(
|
| 599 |
+
StmtPtr s);
|
| 600 |
+
|
| 601 |
+
// replaces all invalid characters with underscore
|
| 602 |
+
TORCH_API std::string sanitizeName(const std::string& input_name);
|
| 603 |
+
|
| 604 |
+
} // namespace tensorexpr
|
| 605 |
+
} // namespace jit
|
| 606 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/loopnest_randomization.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch {
|
| 4 |
+
namespace jit {
|
| 5 |
+
namespace tensorexpr {
|
| 6 |
+
|
| 7 |
+
// Applies a series of loop optimizations chosen randomly. This is only for
|
| 8 |
+
// testing purposes. This allows automatic stress testing of NNC loop
|
| 9 |
+
// transformations.
|
| 10 |
+
void loopnestRandomization(int64_t seed, LoopNest& l);
|
| 11 |
+
} // namespace tensorexpr
|
| 12 |
+
} // namespace jit
|
| 13 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/lowerings.h
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// This file defines classes for registering standard lowerings from JIT to TE
|
| 2 |
+
// IR.
|
| 3 |
+
#pragma once
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/jit/ir/ir.h>
|
| 6 |
+
#include <torch/csrc/jit/runtime/interpreter.h>
|
| 7 |
+
#include <torch/csrc/jit/tensorexpr/analysis.h>
|
| 8 |
+
#include <torch/csrc/jit/tensorexpr/codegen.h>
|
| 9 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace jit {
|
| 13 |
+
namespace tensorexpr {
|
| 14 |
+
|
| 15 |
+
using ArgNone = std::monostate;
|
| 16 |
+
using BufList = std::vector<tensorexpr::BufHandle>;
|
| 17 |
+
using DoubleList = std::vector<double>;
|
| 18 |
+
using IntList = std::vector<int64_t>;
|
| 19 |
+
using ArgValue = std::variant<
|
| 20 |
+
tensorexpr::BufHandle,
|
| 21 |
+
tensorexpr::VarHandle,
|
| 22 |
+
double,
|
| 23 |
+
int64_t,
|
| 24 |
+
bool,
|
| 25 |
+
BufList,
|
| 26 |
+
DoubleList,
|
| 27 |
+
IntList,
|
| 28 |
+
std::string,
|
| 29 |
+
ArgNone>;
|
| 30 |
+
|
| 31 |
+
using NNCLoweringFunction = std::function<Tensor(
|
| 32 |
+
const std::vector<ArgValue>&,
|
| 33 |
+
const std::vector<ExprHandle>&,
|
| 34 |
+
const std::vector<ExprHandle>&,
|
| 35 |
+
const c10::optional<ScalarType>&,
|
| 36 |
+
at::Device)>;
|
| 37 |
+
|
| 38 |
+
TORCH_API FunctionSchemaMap<NNCLoweringFunction>& getNNCLoweringRegistry();
|
| 39 |
+
TORCH_API NNCLoweringFunction getStandardLoweringFor(const std::string& op);
|
| 40 |
+
|
| 41 |
+
struct RegisterNNCLoweringsFunction {
|
| 42 |
+
RegisterNNCLoweringsFunction(
|
| 43 |
+
const std::vector<std::string>& schemas,
|
| 44 |
+
NNCLoweringFunction fn);
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
} // namespace tensorexpr
|
| 48 |
+
} // namespace jit
|
| 49 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/mem_dependency_checker.h
ADDED
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
#include <c10/core/ScalarType.h>
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <utility>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
#include <torch/csrc/jit/tensorexpr/bounds_overlap.h>
|
| 8 |
+
#include <torch/csrc/jit/tensorexpr/ir_mutator.h>
|
| 9 |
+
#include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
|
| 10 |
+
#include <torch/csrc/jit/tensorexpr/ir_visitor.h>
|
| 11 |
+
#include <torch/csrc/jit/tensorexpr/stmt.h>
|
| 12 |
+
|
| 13 |
+
namespace torch {
|
| 14 |
+
namespace jit {
|
| 15 |
+
namespace tensorexpr {
|
| 16 |
+
namespace analysis {
|
| 17 |
+
|
| 18 |
+
enum class AccessType {
|
| 19 |
+
Input,
|
| 20 |
+
Output,
|
| 21 |
+
Load,
|
| 22 |
+
Store,
|
| 23 |
+
Call,
|
| 24 |
+
AtomicAdd,
|
| 25 |
+
Alloc,
|
| 26 |
+
Free
|
| 27 |
+
};
|
| 28 |
+
const char* AccessToString(AccessType a);
|
| 29 |
+
|
| 30 |
+
class AccessInfo;
|
| 31 |
+
using DependencySet = std::unordered_set<std::shared_ptr<AccessInfo>>;
|
| 32 |
+
|
| 33 |
+
/* AccessInfo
|
| 34 |
+
*
|
| 35 |
+
* Represents a single bounded memory access to a buffer, for instance a Load or
|
| 36 |
+
* a Store. Holds information relating to the specific access and links to
|
| 37 |
+
* connected accesses in the dependency graph.
|
| 38 |
+
*/
|
| 39 |
+
class TORCH_API AccessInfo {
|
| 40 |
+
public:
|
| 41 |
+
AccessInfo(
|
| 42 |
+
size_t id,
|
| 43 |
+
AccessType type,
|
| 44 |
+
StmtPtr stmt,
|
| 45 |
+
VarPtr var,
|
| 46 |
+
IndexBounds bounds)
|
| 47 |
+
: id_(id),
|
| 48 |
+
type_(type),
|
| 49 |
+
stmt_(std::move(stmt)),
|
| 50 |
+
expr_(nullptr),
|
| 51 |
+
var_(std::move(var)),
|
| 52 |
+
bounds_(std::move(bounds)) {}
|
| 53 |
+
|
| 54 |
+
AccessInfo(
|
| 55 |
+
size_t id,
|
| 56 |
+
AccessType type,
|
| 57 |
+
ExprPtr expr,
|
| 58 |
+
StmtPtr stmt,
|
| 59 |
+
VarPtr var,
|
| 60 |
+
IndexBounds bounds)
|
| 61 |
+
: id_(id),
|
| 62 |
+
type_(type),
|
| 63 |
+
stmt_(std::move(stmt)),
|
| 64 |
+
expr_(std::move(expr)),
|
| 65 |
+
var_(std::move(var)),
|
| 66 |
+
bounds_(std::move(bounds)) {}
|
| 67 |
+
|
| 68 |
+
// Id is a unique int representing the order this access occurred in the
|
| 69 |
+
// graph.
|
| 70 |
+
size_t id() const {
|
| 71 |
+
return id_;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
// The type of the access (Load, Store, etc).
|
| 75 |
+
AccessType type() const {
|
| 76 |
+
return type_;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
// The enclosing Stmt this access represents. E.g. if this is a Store then
|
| 80 |
+
// Stmt is the Store itself, while if the access is caused by an Expr, this is
|
| 81 |
+
// the most immediate parent Stmt.
|
| 82 |
+
StmtPtr stmt() const {
|
| 83 |
+
return stmt_;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
// If the access is represented by an Expr (such as Load or Call) then this is
|
| 87 |
+
// it, otherwise it's nullptr.
|
| 88 |
+
ExprPtr expr() const {
|
| 89 |
+
return expr_;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
// The Var representing the underlying Buffer.
|
| 93 |
+
VarPtr var() const {
|
| 94 |
+
return var_;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
// A vector of Bounds representing the start and end expression for each
|
| 98 |
+
// dimension.
|
| 99 |
+
IndexBounds& bounds() {
|
| 100 |
+
return bounds_;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
// Each access that this depends upon,
|
| 104 |
+
// eg. if this is a Load, then it contains every Store that immediately
|
| 105 |
+
// contributes to a load of the bounds.
|
| 106 |
+
// or: if this is a Store, it contains all reads on the RHS of the Store.
|
| 107 |
+
const std::map<size_t, std::shared_ptr<AccessInfo>>& dependencies() const {
|
| 108 |
+
return dependencies_;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
// Each access that depends on this one.
|
| 112 |
+
// ie. this access is present in the dependencies map of all accesses that are
|
| 113 |
+
// dependent.
|
| 114 |
+
std::map<size_t, std::shared_ptr<AccessInfo>> dependents() const {
|
| 115 |
+
std::map<size_t, std::shared_ptr<AccessInfo>> res;
|
| 116 |
+
for (const auto& kv : dependents_) {
|
| 117 |
+
res.emplace(kv.first, kv.second.lock());
|
| 118 |
+
}
|
| 119 |
+
return res;
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
// Returns the symbolic expression of the indices of this access.
|
| 123 |
+
std::vector<ExprPtr> getIndices() const;
|
| 124 |
+
|
| 125 |
+
// Establishes a dependency or dependent relationship with another access.
|
| 126 |
+
void addDependency(const std::shared_ptr<AccessInfo>& write);
|
| 127 |
+
void addDependent(const std::shared_ptr<AccessInfo>& read);
|
| 128 |
+
|
| 129 |
+
// helper for checking dependencies.
|
| 130 |
+
bool hasDependency(const std::shared_ptr<AccessInfo>& info) const;
|
| 131 |
+
|
| 132 |
+
// Returns the set of all nodes that are direct (immediate) dependencies of
|
| 133 |
+
// this access.
|
| 134 |
+
DependencySet getDirectDependencies();
|
| 135 |
+
// likewise, returns all nodes that directly depend on this one.
|
| 136 |
+
DependencySet getDirectDependents();
|
| 137 |
+
|
| 138 |
+
// Returns the full list of all nodes in the graph that this access depends
|
| 139 |
+
// on, and all nodes they depend on, and so forth, back to the inputs.
|
| 140 |
+
DependencySet getIndirectDependencies();
|
| 141 |
+
// likewise, returns the full list of all nodes that depend on this node, and
|
| 142 |
+
// all nodes that depend on those nodes and so on down to the outputs.
|
| 143 |
+
DependencySet getIndirectDependents();
|
| 144 |
+
|
| 145 |
+
// Does this access represent a read of memory (Load, ReduceOp, Call, etc).
|
| 146 |
+
bool isRead() const;
|
| 147 |
+
// Does this access represent a write of memory (Store, etc).
|
| 148 |
+
bool isWrite() const;
|
| 149 |
+
|
| 150 |
+
// Helpers for dumping accesses in various formats.
|
| 151 |
+
void print() const;
|
| 152 |
+
void dumpDOT(std::ostream& os) const;
|
| 153 |
+
const char* AccessTypeColour() const;
|
| 154 |
+
|
| 155 |
+
private:
|
| 156 |
+
size_t id_;
|
| 157 |
+
AccessType type_;
|
| 158 |
+
StmtPtr stmt_;
|
| 159 |
+
ExprPtr expr_;
|
| 160 |
+
VarPtr var_;
|
| 161 |
+
IndexBounds bounds_;
|
| 162 |
+
|
| 163 |
+
// Yes these should be sorted.
|
| 164 |
+
std::map<size_t, std::shared_ptr<AccessInfo>> dependencies_;
|
| 165 |
+
std::map<size_t, std::weak_ptr<AccessInfo>> dependents_;
|
| 166 |
+
};
|
| 167 |
+
|
| 168 |
+
using VarBoundMap = std::unordered_map<VarPtr, Bound>;
|
| 169 |
+
|
| 170 |
+
/* MemDependencyChecker analyses a IR fragment and builds a dependency graph of
|
| 171 |
+
* accesses contained within.
|
| 172 |
+
*
|
| 173 |
+
* It's possible to retrieve the entire graph in node-object form, or can be
|
| 174 |
+
* used as an oracle for answering dependency questions. e.g:
|
| 175 |
+
*
|
| 176 |
+
* analyzer.hasIndirectDependency(BufA, BufB); or,
|
| 177 |
+
* analyzer.hasDirectDependency(LoadA, StoreB);
|
| 178 |
+
*/
|
| 179 |
+
class TORCH_API MemDependencyChecker : public IRVisitor {
|
| 180 |
+
struct Scope;
|
| 181 |
+
|
| 182 |
+
public:
|
| 183 |
+
MemDependencyChecker();
|
| 184 |
+
MemDependencyChecker(
|
| 185 |
+
const std::unordered_set<BufPtr>& inputs,
|
| 186 |
+
const std::unordered_set<BufPtr>& outputs);
|
| 187 |
+
MemDependencyChecker(
|
| 188 |
+
const std::vector<BufHandle>& inputs,
|
| 189 |
+
const std::vector<BufHandle>& outputs);
|
| 190 |
+
|
| 191 |
+
~MemDependencyChecker() override = default;
|
| 192 |
+
|
| 193 |
+
// Whether or not to allow loop execution order to influence dependency
|
| 194 |
+
// calculation. If the loop may later be parallelized you don't want this.
|
| 195 |
+
bool allowLoopExecutionOrderAnalysis(bool allow = true);
|
| 196 |
+
|
| 197 |
+
// Dependency Checking API.
|
| 198 |
+
// The goal is to have enough overloads here so you don't really have to think
|
| 199 |
+
// about it.
|
| 200 |
+
|
| 201 |
+
// Returns true if any read in A has a direct dependence on a write in B.
|
| 202 |
+
bool dependsDirectly(StmtPtr A, StmtPtr B);
|
| 203 |
+
bool dependsDirectly(ExprPtr A, StmtPtr B);
|
| 204 |
+
|
| 205 |
+
// Returns true of the output depends directly on a write contained in B.
|
| 206 |
+
bool dependsDirectly(BufPtr output, StmtPtr B);
|
| 207 |
+
|
| 208 |
+
// Returns true if a read in A depends directly on the provided input.
|
| 209 |
+
bool dependsDirectly(StmtPtr A, BufPtr input);
|
| 210 |
+
bool dependsDirectly(ExprPtr A, BufPtr input);
|
| 211 |
+
|
| 212 |
+
// Outputs/inputs cannot depend directly.
|
| 213 |
+
|
| 214 |
+
// Returns true if the access A has B as an immediate dependency.
|
| 215 |
+
bool dependsDirectly(
|
| 216 |
+
const std::shared_ptr<AccessInfo>& A,
|
| 217 |
+
const std::shared_ptr<AccessInfo>& B);
|
| 218 |
+
|
| 219 |
+
// Returns true if any read in A has an ancestor write contained in B.
|
| 220 |
+
bool dependsIndirectly(StmtPtr A, StmtPtr B);
|
| 221 |
+
bool dependsIndirectly(ExprPtr A, StmtPtr B);
|
| 222 |
+
|
| 223 |
+
// Returns true of the output depends indirectly on a write contained in B.
|
| 224 |
+
bool dependsIndirectly(BufPtr output, StmtPtr B);
|
| 225 |
+
|
| 226 |
+
// Returns true if a read in A depends indirectly on the provided input.
|
| 227 |
+
bool dependsIndirectly(StmtPtr A, BufPtr input);
|
| 228 |
+
bool dependsIndirectly(ExprPtr A, BufPtr input);
|
| 229 |
+
|
| 230 |
+
// returns true if the output uses any load of the input.
|
| 231 |
+
bool dependsIndirectly(BufPtr output, BufPtr input);
|
| 232 |
+
|
| 233 |
+
// Returns true if the access A has a dependency chain to access B.
|
| 234 |
+
bool dependsIndirectly(
|
| 235 |
+
const std::shared_ptr<AccessInfo>& A,
|
| 236 |
+
const std::shared_ptr<AccessInfo>& B);
|
| 237 |
+
|
| 238 |
+
// Returns the AccessInfo
|
| 239 |
+
std::shared_ptr<AccessInfo> accessFor(StmtPtr A) const;
|
| 240 |
+
std::shared_ptr<AccessInfo> accessFor(ExprPtr A) const;
|
| 241 |
+
|
| 242 |
+
// Returns all AccessInfos.
|
| 243 |
+
std::unordered_set<std::shared_ptr<AccessInfo>> accessesWithin(
|
| 244 |
+
StmtPtr A) const;
|
| 245 |
+
// TODO: this will return only the AccessInfo for A. It's included for
|
| 246 |
+
// completeness but be aware it wont return accesses used in the computation
|
| 247 |
+
// of A.
|
| 248 |
+
std::unordered_set<std::shared_ptr<AccessInfo>> accessesWithin(
|
| 249 |
+
ExprPtr A) const;
|
| 250 |
+
|
| 251 |
+
// Accesses relating to input and output buffers.
|
| 252 |
+
std::shared_ptr<AccessInfo> input(BufPtr B) const;
|
| 253 |
+
std::shared_ptr<AccessInfo> output(BufPtr B) const;
|
| 254 |
+
|
| 255 |
+
// Returns the full history of reads and writes.
|
| 256 |
+
const std::vector<std::shared_ptr<AccessInfo>>& getHistory() const;
|
| 257 |
+
|
| 258 |
+
// Dumps the dependency graph in DOT format.
|
| 259 |
+
void dumpDAG(const std::string& filename) const;
|
| 260 |
+
|
| 261 |
+
private:
|
| 262 |
+
// Node visitors.
|
| 263 |
+
void visit(StorePtr v) override;
|
| 264 |
+
void visit(LoadPtr v) override;
|
| 265 |
+
void visit(ForPtr v) override;
|
| 266 |
+
void visit(CondPtr v) override;
|
| 267 |
+
void visit(IfThenElsePtr v) override;
|
| 268 |
+
void visit(CompareSelectPtr v) override;
|
| 269 |
+
void visit(BlockPtr v) override;
|
| 270 |
+
void visit(LetPtr v) override;
|
| 271 |
+
void visit(AtomicAddPtr v) override;
|
| 272 |
+
void visit(AllocatePtr v) override;
|
| 273 |
+
void visit(FreePtr v) override;
|
| 274 |
+
|
| 275 |
+
using BoundRelationship = std::pair<IndexBounds, std::shared_ptr<AccessInfo>>;
|
| 276 |
+
|
| 277 |
+
// An internal struct holding the accesses found within a scope Block.
|
| 278 |
+
struct Scope {
|
| 279 |
+
Scope(BlockPtr b, std::shared_ptr<Scope> p)
|
| 280 |
+
: block(std::move(b)), parent(std::move(p)) {}
|
| 281 |
+
|
| 282 |
+
BlockPtr block;
|
| 283 |
+
std::shared_ptr<Scope> parent;
|
| 284 |
+
|
| 285 |
+
std::unordered_map<VarPtr, Bound> shadowedVarBounds;
|
| 286 |
+
std::unordered_set<VarPtr> localVars;
|
| 287 |
+
|
| 288 |
+
std::vector<std::shared_ptr<AccessInfo>> accesses_;
|
| 289 |
+
|
| 290 |
+
std::unordered_map<VarPtr, std::list<BoundRelationship>> openWrites_;
|
| 291 |
+
};
|
| 292 |
+
std::shared_ptr<Scope> currentScope_;
|
| 293 |
+
|
| 294 |
+
bool allowExecutionOrderAnalysis_{false};
|
| 295 |
+
|
| 296 |
+
std::unordered_multimap<StmtPtr, std::shared_ptr<AccessInfo>> stmtToAccess_;
|
| 297 |
+
std::unordered_multimap<ExprPtr, std::shared_ptr<AccessInfo>> exprToAccess_;
|
| 298 |
+
std::unordered_map<StmtPtr, std::vector<std::shared_ptr<AccessInfo>>>
|
| 299 |
+
scopeToAccesses_;
|
| 300 |
+
|
| 301 |
+
VarBoundMap knownVarBounds_;
|
| 302 |
+
|
| 303 |
+
// Finds all accesses that are reads within the scope of v.
|
| 304 |
+
template <typename StmtOrExprPtr>
|
| 305 |
+
DependencySet getAllReadsWithin(StmtOrExprPtr v) {
|
| 306 |
+
DependencySet reads;
|
| 307 |
+
auto insertAllReads = [&](const auto& nodes) {
|
| 308 |
+
for (const auto& l : nodes) {
|
| 309 |
+
auto bound = exprToAccess_.equal_range(l);
|
| 310 |
+
for (auto it = bound.first; it != bound.second; ++it) {
|
| 311 |
+
if (it->second->isRead()) {
|
| 312 |
+
reads.insert(it->second);
|
| 313 |
+
}
|
| 314 |
+
}
|
| 315 |
+
}
|
| 316 |
+
};
|
| 317 |
+
|
| 318 |
+
// Look for and insert accesses belonging to all nodes that act like
|
| 319 |
+
// reads.
|
| 320 |
+
insertAllReads(NodeFinder<Load>::find(v));
|
| 321 |
+
insertAllReads(NodeFinder<ReduceOp>::find(v));
|
| 322 |
+
|
| 323 |
+
return reads;
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
// Finds all accesses that are writes within the scope of v.
|
| 327 |
+
// Writes cannot occur in Exprs, so this is a little simpler.
|
| 328 |
+
DependencySet getAllWritesWithin(StmtPtr v) {
|
| 329 |
+
DependencySet writes;
|
| 330 |
+
|
| 331 |
+
// writes just Store currently.
|
| 332 |
+
auto stores = NodeFinder<Store>::find(std::move(v));
|
| 333 |
+
for (const auto& s : stores) {
|
| 334 |
+
auto bound = stmtToAccess_.equal_range(s);
|
| 335 |
+
for (auto it = bound.first; it != bound.second; ++it) {
|
| 336 |
+
if (it->second->isWrite()) {
|
| 337 |
+
writes.insert(it->second);
|
| 338 |
+
}
|
| 339 |
+
}
|
| 340 |
+
}
|
| 341 |
+
return writes;
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
// Templated helpers to work on either Exprs or Stmts.
|
| 345 |
+
template <typename StmtOrExprPtr>
|
| 346 |
+
bool dependsDirectlyHelper(StmtOrExprPtr A, StmtPtr B) {
|
| 347 |
+
auto aReads = getAllReadsWithin(A);
|
| 348 |
+
auto bWrites = getAllWritesWithin(B);
|
| 349 |
+
|
| 350 |
+
for (auto& read : aReads) {
|
| 351 |
+
for (auto& depPair : read->dependencies()) {
|
| 352 |
+
if (bWrites.count(depPair.second) != 0) {
|
| 353 |
+
return true;
|
| 354 |
+
}
|
| 355 |
+
}
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
return false;
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
template <typename StmtOrExprPtr>
|
| 362 |
+
bool dependsIndirectlyHelper(StmtOrExprPtr A, StmtPtr B) {
|
| 363 |
+
auto aReads = getAllReadsWithin(A);
|
| 364 |
+
auto bWrites = getAllWritesWithin(B);
|
| 365 |
+
|
| 366 |
+
auto aDeps = getAllWriteDependencies(aReads);
|
| 367 |
+
|
| 368 |
+
for (auto& dependency : aDeps) {
|
| 369 |
+
if (bWrites.count(dependency) != 0) {
|
| 370 |
+
return true;
|
| 371 |
+
}
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
return false;
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
DependencySet getAllWriteDependencies(const DependencySet& products);
|
| 378 |
+
|
| 379 |
+
// Maps for inputs and outputs, since they aren't present directly in the IR.
|
| 380 |
+
std::unordered_map<BufPtr, std::shared_ptr<AccessInfo>> inputs_;
|
| 381 |
+
std::unordered_map<BufPtr, std::shared_ptr<AccessInfo>> outputs_;
|
| 382 |
+
std::unordered_map<VarPtr, std::shared_ptr<AccessInfo>> intermediates_;
|
| 383 |
+
|
| 384 |
+
// Inserts accesses for Buf's: specifically for inputs and outputs.
|
| 385 |
+
void insertBuffers(
|
| 386 |
+
std::unordered_map<BufPtr, std::shared_ptr<AccessInfo>>& bufs,
|
| 387 |
+
AccessType type);
|
| 388 |
+
|
| 389 |
+
// Update the write history with a new write, adding dependencies and closing
|
| 390 |
+
// any overlapped writes (if possible).
|
| 391 |
+
void updateWriteHistory(
|
| 392 |
+
std::list<BoundRelationship>& writeHistory,
|
| 393 |
+
const std::shared_ptr<AccessInfo>& info,
|
| 394 |
+
size_t latestAccessToClose,
|
| 395 |
+
bool closeOverlapped = true,
|
| 396 |
+
bool insert = true);
|
| 397 |
+
|
| 398 |
+
// Merge a child scope into a parent scope, adding dependencies for open
|
| 399 |
+
// writes in the parent to accesses in the child.
|
| 400 |
+
void mergeScope(
|
| 401 |
+
const std::shared_ptr<Scope>& child,
|
| 402 |
+
const std::shared_ptr<Scope>& parent,
|
| 403 |
+
bool closeOverlapped = true);
|
| 404 |
+
|
| 405 |
+
// Binds symbolic vars in indices with the low and high bound for those vars.
|
| 406 |
+
std::vector<Bound> getIndicesBounds(const std::vector<ExprPtr>& indices);
|
| 407 |
+
|
| 408 |
+
size_t nextAccess_{0};
|
| 409 |
+
StmtPtr lastStmt_{nullptr};
|
| 410 |
+
};
|
| 411 |
+
|
| 412 |
+
} // namespace analysis
|
| 413 |
+
} // namespace tensorexpr
|
| 414 |
+
} // namespace jit
|
| 415 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/conv2d.h
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/operators/misc.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace jit {
|
| 8 |
+
namespace tensorexpr {
|
| 9 |
+
|
| 10 |
+
// An API to compute 2D depthwise convolutions with bias.
|
| 11 |
+
TORCH_API Tensor conv2d_depthwise(
|
| 12 |
+
BufHandle input,
|
| 13 |
+
BufHandle weight,
|
| 14 |
+
BufHandle bias,
|
| 15 |
+
int stride,
|
| 16 |
+
int pad,
|
| 17 |
+
int groups);
|
| 18 |
+
|
| 19 |
+
// An API to compute 2D depthwise convolutions without bias.
|
| 20 |
+
TORCH_API Tensor conv2d_depthwise(
|
| 21 |
+
BufHandle input,
|
| 22 |
+
BufHandle weight,
|
| 23 |
+
int stride,
|
| 24 |
+
int pad,
|
| 25 |
+
int groups);
|
| 26 |
+
|
| 27 |
+
TORCH_API Tensor conv2d_depthwise(
|
| 28 |
+
BufHandle input,
|
| 29 |
+
BufHandle weight,
|
| 30 |
+
BufHandle bias,
|
| 31 |
+
ExprHandle N,
|
| 32 |
+
ExprHandle C,
|
| 33 |
+
ExprHandle H,
|
| 34 |
+
ExprHandle W,
|
| 35 |
+
ExprHandle K,
|
| 36 |
+
ExprHandle CperG,
|
| 37 |
+
ExprHandle R,
|
| 38 |
+
ExprHandle S,
|
| 39 |
+
ExprHandle stride,
|
| 40 |
+
ExprHandle pad,
|
| 41 |
+
ExprHandle groups);
|
| 42 |
+
|
| 43 |
+
TORCH_API Tensor conv2d_depthwise(
|
| 44 |
+
BufHandle input,
|
| 45 |
+
BufHandle weight,
|
| 46 |
+
ExprHandle N,
|
| 47 |
+
ExprHandle C,
|
| 48 |
+
ExprHandle H,
|
| 49 |
+
ExprHandle W,
|
| 50 |
+
ExprHandle K,
|
| 51 |
+
ExprHandle CperG,
|
| 52 |
+
ExprHandle R,
|
| 53 |
+
ExprHandle S,
|
| 54 |
+
ExprHandle stride,
|
| 55 |
+
ExprHandle pad,
|
| 56 |
+
ExprHandle groups);
|
| 57 |
+
|
| 58 |
+
bool conv2dIsSupported(
|
| 59 |
+
const TensorInfo& input,
|
| 60 |
+
const TensorInfo& weight,
|
| 61 |
+
const TensorInfo& bias,
|
| 62 |
+
const std::vector<int64_t>& stride,
|
| 63 |
+
const std::vector<int64_t>& pad,
|
| 64 |
+
const std::vector<int64_t>& dilation,
|
| 65 |
+
int64_t groups);
|
| 66 |
+
bool mkldnnPrepackedConvIsSupported(
|
| 67 |
+
const TensorInfo& input,
|
| 68 |
+
const TensorInfo& weight,
|
| 69 |
+
const std::vector<int64_t>& stride,
|
| 70 |
+
const std::vector<int64_t>& pad,
|
| 71 |
+
const std::vector<int64_t>& dilation,
|
| 72 |
+
int64_t groups);
|
| 73 |
+
Tensor computeConv2d(
|
| 74 |
+
const std::vector<ArgValue>& inputs,
|
| 75 |
+
const std::vector<ExprHandle>& outputShape,
|
| 76 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 77 |
+
const c10::optional<ScalarType>& outputType,
|
| 78 |
+
at::Device device);
|
| 79 |
+
Tensor computeConv1d(
|
| 80 |
+
const std::vector<ArgValue>& inputs,
|
| 81 |
+
const std::vector<ExprHandle>& outputShape,
|
| 82 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 83 |
+
const c10::optional<ScalarType>& outputType,
|
| 84 |
+
at::Device device);
|
| 85 |
+
Tensor computePrepackedConv2dClampRun(
|
| 86 |
+
const std::vector<ArgValue>& inputs,
|
| 87 |
+
const std::vector<ExprHandle>& outputShape,
|
| 88 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 89 |
+
const c10::optional<ScalarType>& outputType,
|
| 90 |
+
at::Device device);
|
| 91 |
+
Tensor computePrepackedLinearClampRun(
|
| 92 |
+
const std::vector<ArgValue>& inputs,
|
| 93 |
+
const std::vector<ExprHandle>& outputShape,
|
| 94 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 95 |
+
const c10::optional<ScalarType>& outputType,
|
| 96 |
+
at::Device device);
|
| 97 |
+
Tensor computeMkldnnPrepackedConvRun(
|
| 98 |
+
const std::vector<ArgValue>& inputs,
|
| 99 |
+
const std::vector<ExprHandle>& outputShape,
|
| 100 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 101 |
+
const c10::optional<ScalarType>& outputType,
|
| 102 |
+
at::Device device);
|
| 103 |
+
} // namespace tensorexpr
|
| 104 |
+
} // namespace jit
|
| 105 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/matmul.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/kernel.h>
|
| 4 |
+
|
| 5 |
+
namespace torch {
|
| 6 |
+
namespace jit {
|
| 7 |
+
namespace tensorexpr {
|
| 8 |
+
|
| 9 |
+
Tensor computeMatmul(
|
| 10 |
+
const std::vector<ArgValue>& inputs,
|
| 11 |
+
const std::vector<ExprHandle>& outputShape,
|
| 12 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 13 |
+
const c10::optional<ScalarType>& outputType,
|
| 14 |
+
at::Device device);
|
| 15 |
+
Tensor computeAddMM(
|
| 16 |
+
const std::vector<ArgValue>& inputs,
|
| 17 |
+
const std::vector<ExprHandle>& outputShape,
|
| 18 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 19 |
+
const c10::optional<ScalarType>& outputType,
|
| 20 |
+
at::Device device);
|
| 21 |
+
|
| 22 |
+
} // namespace tensorexpr
|
| 23 |
+
} // namespace jit
|
| 24 |
+
} // namespace torch
|
videollama2/lib/python3.10/site-packages/torch/include/torch/csrc/jit/tensorexpr/operators/misc.h
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/jit/tensorexpr/fwd_decls.h>
|
| 4 |
+
#include <torch/csrc/jit/tensorexpr/lowerings.h>
|
| 5 |
+
#include <torch/csrc/jit/tensorexpr/tensor.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace jit {
|
| 9 |
+
namespace tensorexpr {
|
| 10 |
+
|
| 11 |
+
struct TensorInfo {
|
| 12 |
+
std::vector<int64_t> dims;
|
| 13 |
+
c10::ScalarType dtype;
|
| 14 |
+
};
|
| 15 |
+
c10::optional<TensorInfo> getTensorInfo(BufHandle b);
|
| 16 |
+
|
| 17 |
+
int64_t normalizeAndCheckIndex(int64_t idx, int64_t list_size);
|
| 18 |
+
|
| 19 |
+
// Convert boolean to integer, if needed.
|
| 20 |
+
ExprHandle boolToInteger(const ExprHandle& x);
|
| 21 |
+
ExprHandle promoteToDtype(ExprHandle e, ScalarType dt);
|
| 22 |
+
void promoteInputs(
|
| 23 |
+
std::vector<ExprHandle>& inputs,
|
| 24 |
+
const int typeConstraints = kAllTypes);
|
| 25 |
+
ExprHandle promoteIntegerToDefaultType(const ExprHandle& e);
|
| 26 |
+
ExprHandle promoteHalfToFloat(const ExprHandle& e);
|
| 27 |
+
ExprHandle demoteOutput(
|
| 28 |
+
const ExprHandle& e,
|
| 29 |
+
const c10::optional<ScalarType> type);
|
| 30 |
+
|
| 31 |
+
std::vector<ExprHandle> broadcastShapes(
|
| 32 |
+
std::vector<std::vector<ExprHandle>> shapes);
|
| 33 |
+
std::vector<ExprHandle> broadcastShapes(
|
| 34 |
+
const std::vector<ExprHandle>& a,
|
| 35 |
+
const std::vector<ExprHandle>& b);
|
| 36 |
+
|
| 37 |
+
std::vector<ExprHandle> valueShape(const ArgValue& v);
|
| 38 |
+
ExprHandle tensorOrConstant(
|
| 39 |
+
const ArgValue& v,
|
| 40 |
+
const std::vector<ExprHandle>& axes);
|
| 41 |
+
ExprHandle scalarOrConstant(const ArgValue& v);
|
| 42 |
+
ExprHandle broadcast(BufHandle b, const std::vector<ExprHandle>& axes);
|
| 43 |
+
ExprHandle constant(const ArgValue& v);
|
| 44 |
+
|
| 45 |
+
ExprHandle clamp(
|
| 46 |
+
const ExprHandle& cmin,
|
| 47 |
+
const ExprHandle& cmax,
|
| 48 |
+
const ExprHandle& input);
|
| 49 |
+
|
| 50 |
+
Tensor computeChunk(
|
| 51 |
+
const std::vector<ArgValue>& inputs,
|
| 52 |
+
const std::vector<ExprHandle>& outputShape,
|
| 53 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 54 |
+
const c10::optional<ScalarType>& outputType,
|
| 55 |
+
at::Device device);
|
| 56 |
+
Tensor computeTranspose(
|
| 57 |
+
const std::vector<ArgValue>& inputs,
|
| 58 |
+
const std::vector<ExprHandle>& outputShape,
|
| 59 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 60 |
+
const c10::optional<ScalarType>& outputType,
|
| 61 |
+
at::Device device);
|
| 62 |
+
Tensor computeExpand(
|
| 63 |
+
const std::vector<ArgValue>& inputs,
|
| 64 |
+
const std::vector<ExprHandle>& outputShape,
|
| 65 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 66 |
+
const c10::optional<ScalarType>& outputType,
|
| 67 |
+
at::Device device);
|
| 68 |
+
Tensor computeReshape(
|
| 69 |
+
const std::vector<ArgValue>& inputs,
|
| 70 |
+
const std::vector<ExprHandle>& outputShape,
|
| 71 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 72 |
+
const c10::optional<ScalarType>& outputType,
|
| 73 |
+
at::Device device);
|
| 74 |
+
Tensor computeFlatten(
|
| 75 |
+
const std::vector<ArgValue>& inputs,
|
| 76 |
+
const std::vector<ExprHandle>& outputShape,
|
| 77 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 78 |
+
const c10::optional<ScalarType>& outputType,
|
| 79 |
+
at::Device device);
|
| 80 |
+
Tensor computeCatWoConditionals(
|
| 81 |
+
const std::vector<ArgValue>& inputs,
|
| 82 |
+
const std::vector<ExprHandle>& outputShape);
|
| 83 |
+
Tensor computeCat(
|
| 84 |
+
const std::vector<ArgValue>& inputs,
|
| 85 |
+
const std::vector<ExprHandle>& outputShape,
|
| 86 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 87 |
+
const c10::optional<ScalarType>& outputType,
|
| 88 |
+
at::Device device);
|
| 89 |
+
Tensor computeEmbedding(
|
| 90 |
+
const std::vector<ArgValue>& inputs,
|
| 91 |
+
const std::vector<ExprHandle>& outputShape,
|
| 92 |
+
const std::vector<ExprHandle>& outputStrides,
|
| 93 |
+
const c10::optional<ScalarType>& outputType,
|
| 94 |
+
at::Device device);
|
| 95 |
+
|
| 96 |
+
} // namespace tensorexpr
|
| 97 |
+
} // namespace jit
|
| 98 |
+
} // namespace torch
|