Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backoff.hpp +52 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp +63 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp +11 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp +59 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp +448 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp +249 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp +25 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h +23 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h +46 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/message.h +193 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h +84 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h +28 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_functions.h +66 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h +45 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_resp.h +23 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_rpc_handler.h +129 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback.h +32 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_impl.h +61 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_no_python.h +115 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc.h +9 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_agent.h +337 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_command_base.h +23 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h +335 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h +416 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_proto.h +160 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h +71 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_remote_call.h +57 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h +26 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_agent.h +492 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_utils.h +119 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h +37 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h +62 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h +38 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h +33 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h +90 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h +9 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h +13 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/device_lazy_init.h +50 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/init.h +9 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/invalid_arguments.h +15 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/nested.h +15 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/numpy_stub.h +21 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h +67 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/out_types.h +15 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h +418 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pycfunction_helpers.h +13 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h +7 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_arg_parser.h +1294 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_compat.h +43 -0
- vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_dispatch.h +16 -0
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Backoff.hpp
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <chrono>
|
| 4 |
+
#include <random>
|
| 5 |
+
#include <thread>
|
| 6 |
+
|
| 7 |
+
#include <c10/macros/Macros.h>
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
|
| 11 |
+
class TORCH_API Backoff {
|
| 12 |
+
public:
|
| 13 |
+
virtual ~Backoff() = default;
|
| 14 |
+
|
| 15 |
+
virtual std::chrono::milliseconds nextBackoff() = 0;
|
| 16 |
+
virtual void reset() = 0;
|
| 17 |
+
|
| 18 |
+
void sleepBackoff() {
|
| 19 |
+
std::this_thread::sleep_for(nextBackoff());
|
| 20 |
+
}
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
class TORCH_API ExponentialBackoffWithJitter : public Backoff {
|
| 24 |
+
public:
|
| 25 |
+
ExponentialBackoffWithJitter();
|
| 26 |
+
|
| 27 |
+
std::chrono::milliseconds nextBackoff() override;
|
| 28 |
+
void reset() override;
|
| 29 |
+
|
| 30 |
+
public:
|
| 31 |
+
std::chrono::milliseconds initialInterval{500};
|
| 32 |
+
double randomizationFactor{0.5};
|
| 33 |
+
double multiplier{1.5};
|
| 34 |
+
std::chrono::milliseconds maxInterval{60000};
|
| 35 |
+
|
| 36 |
+
private:
|
| 37 |
+
std::mt19937 gen_;
|
| 38 |
+
std::chrono::milliseconds currentInterval_{0};
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
class TORCH_API FixedBackoff : public Backoff {
|
| 42 |
+
public:
|
| 43 |
+
FixedBackoff(std::chrono::milliseconds interval);
|
| 44 |
+
|
| 45 |
+
std::chrono::milliseconds nextBackoff() override;
|
| 46 |
+
void reset() override;
|
| 47 |
+
|
| 48 |
+
private:
|
| 49 |
+
std::chrono::milliseconds interval_;
|
| 50 |
+
};
|
| 51 |
+
|
| 52 |
+
} // namespace c10d
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/FileStore.hpp
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <sys/types.h>
|
| 4 |
+
|
| 5 |
+
#include <mutex>
|
| 6 |
+
#include <unordered_map>
|
| 7 |
+
|
| 8 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 9 |
+
|
| 10 |
+
namespace c10d {
|
| 11 |
+
|
| 12 |
+
class TORCH_API FileStore : public Store {
|
| 13 |
+
public:
|
| 14 |
+
explicit FileStore(std::string path, int numWorkers);
|
| 15 |
+
|
| 16 |
+
~FileStore() override;
|
| 17 |
+
|
| 18 |
+
void set(const std::string& key, const std::vector<uint8_t>& value) override;
|
| 19 |
+
|
| 20 |
+
std::vector<uint8_t> compareSet(
|
| 21 |
+
const std::string& key,
|
| 22 |
+
const std::vector<uint8_t>& expectedValue,
|
| 23 |
+
const std::vector<uint8_t>& desiredValue) override;
|
| 24 |
+
|
| 25 |
+
std::vector<uint8_t> get(const std::string& key) override;
|
| 26 |
+
|
| 27 |
+
int64_t add(const std::string& key, int64_t value) override;
|
| 28 |
+
|
| 29 |
+
int64_t getNumKeys() override;
|
| 30 |
+
|
| 31 |
+
bool deleteKey(const std::string& key) override;
|
| 32 |
+
|
| 33 |
+
bool check(const std::vector<std::string>& keys) override;
|
| 34 |
+
|
| 35 |
+
void wait(const std::vector<std::string>& keys) override;
|
| 36 |
+
|
| 37 |
+
void wait(
|
| 38 |
+
const std::vector<std::string>& keys,
|
| 39 |
+
const std::chrono::milliseconds& timeout) override;
|
| 40 |
+
|
| 41 |
+
// Returns the path used by the FileStore.
|
| 42 |
+
const std::string& getPath() const noexcept {
|
| 43 |
+
return path_;
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
protected:
|
| 47 |
+
int64_t addHelper(const std::string& key, int64_t i);
|
| 48 |
+
|
| 49 |
+
std::string path_;
|
| 50 |
+
off_t pos_{0};
|
| 51 |
+
|
| 52 |
+
int numWorkers_;
|
| 53 |
+
const std::string cleanupKey_;
|
| 54 |
+
const std::string refCountKey_;
|
| 55 |
+
const std::string regularPrefix_;
|
| 56 |
+
const std::string deletePrefix_;
|
| 57 |
+
|
| 58 |
+
std::unordered_map<std::string, std::vector<uint8_t>> cache_;
|
| 59 |
+
|
| 60 |
+
std::mutex activeFileOpLock_;
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
} // namespace c10d
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/Functional.hpp
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Work.hpp>
|
| 4 |
+
|
| 5 |
+
namespace c10d {
|
| 6 |
+
|
| 7 |
+
C10_EXPORT void register_work(
|
| 8 |
+
const at::Tensor& tensor,
|
| 9 |
+
const c10::intrusive_ptr<c10d::Work>& work);
|
| 10 |
+
|
| 11 |
+
} // namespace c10d
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/HashStore.hpp
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <condition_variable>
|
| 4 |
+
#include <mutex>
|
| 5 |
+
#include <unordered_map>
|
| 6 |
+
|
| 7 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 8 |
+
|
| 9 |
+
namespace c10d {
|
| 10 |
+
|
| 11 |
+
class TORCH_API HashStore : public Store {
|
| 12 |
+
public:
|
| 13 |
+
~HashStore() override = default;
|
| 14 |
+
|
| 15 |
+
void set(const std::string& key, const std::vector<uint8_t>& data) override;
|
| 16 |
+
|
| 17 |
+
std::vector<uint8_t> compareSet(
|
| 18 |
+
const std::string& key,
|
| 19 |
+
const std::vector<uint8_t>& expectedValue,
|
| 20 |
+
const std::vector<uint8_t>& desiredValue) override;
|
| 21 |
+
|
| 22 |
+
std::vector<uint8_t> get(const std::string& key) override;
|
| 23 |
+
|
| 24 |
+
void wait(const std::vector<std::string>& keys) override {
|
| 25 |
+
wait(keys, timeout_);
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
void wait(
|
| 29 |
+
const std::vector<std::string>& keys,
|
| 30 |
+
const std::chrono::milliseconds& timeout) override;
|
| 31 |
+
|
| 32 |
+
int64_t add(const std::string& key, int64_t value) override;
|
| 33 |
+
|
| 34 |
+
int64_t getNumKeys() override;
|
| 35 |
+
|
| 36 |
+
bool check(const std::vector<std::string>& keys) override;
|
| 37 |
+
|
| 38 |
+
bool deleteKey(const std::string& key) override;
|
| 39 |
+
|
| 40 |
+
void append(const std::string& key, const std::vector<uint8_t>& value)
|
| 41 |
+
override;
|
| 42 |
+
|
| 43 |
+
std::vector<std::vector<uint8_t>> multiGet(
|
| 44 |
+
const std::vector<std::string>& keys) override;
|
| 45 |
+
|
| 46 |
+
void multiSet(
|
| 47 |
+
const std::vector<std::string>& keys,
|
| 48 |
+
const std::vector<std::vector<uint8_t>>& values) override;
|
| 49 |
+
|
| 50 |
+
// Returns true if this store support append, multiGet and multiSet
|
| 51 |
+
bool hasExtendedApi() const override;
|
| 52 |
+
|
| 53 |
+
protected:
|
| 54 |
+
std::unordered_map<std::string, std::vector<uint8_t>> map_;
|
| 55 |
+
std::mutex m_;
|
| 56 |
+
std::condition_variable cv_;
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
} // namespace c10d
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
ADDED
|
@@ -0,0 +1,448 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_C10D_GLOO
|
| 4 |
+
|
| 5 |
+
#include <condition_variable>
|
| 6 |
+
#include <deque>
|
| 7 |
+
#include <mutex>
|
| 8 |
+
#include <thread>
|
| 9 |
+
#include <vector>
|
| 10 |
+
|
| 11 |
+
#include <gloo/algorithm.h>
|
| 12 |
+
#include <gloo/common/error.h>
|
| 13 |
+
#include <gloo/context.h>
|
| 14 |
+
#include <gloo/rendezvous/store.h>
|
| 15 |
+
#include <gloo/transport/device.h>
|
| 16 |
+
|
| 17 |
+
#include <c10/util/hash.h>
|
| 18 |
+
|
| 19 |
+
#include <torch/csrc/distributed/c10d/Backend.hpp>
|
| 20 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 21 |
+
#include <torch/csrc/distributed/c10d/Types.hpp>
|
| 22 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 23 |
+
|
| 24 |
+
namespace c10d {
|
| 25 |
+
|
| 26 |
+
constexpr const char* GLOO_BACKEND_NAME = "gloo";
|
| 27 |
+
|
| 28 |
+
// ProcessGroupGloo implements Gloo bindings for c10d.
|
| 29 |
+
//
|
| 30 |
+
// All functions on this class are expected to be called in the same
|
| 31 |
+
// order across processes in the group. This is the only way that we
|
| 32 |
+
// can guarantee to match up the same calls across processes. For
|
| 33 |
+
// multi-threaded usage of process groups, you can use consider using
|
| 34 |
+
// multiple process group instances.
|
| 35 |
+
//
|
| 36 |
+
// The Gloo algorithms that this class calls into are cached by their
|
| 37 |
+
// signature (see description of AlgorithmKey above). This cache works
|
| 38 |
+
// as follows: every function call instantiates an AlgorithmKey and
|
| 39 |
+
// looks in the cache for existing entries. If there is one, it is
|
| 40 |
+
// removed from the cache and returned to the caller. If there are
|
| 41 |
+
// none, a new entry is created and returned. If an entry was created
|
| 42 |
+
// before, but is still in use, the call will block and wait until the
|
| 43 |
+
// entry is returned to the cache.
|
| 44 |
+
//
|
| 45 |
+
// In the future, we hope to extend this to allow multiple entries per
|
| 46 |
+
// key, to enable parallelism for a single key. The number of entries
|
| 47 |
+
// per key must always be identical for all processes. This maximum
|
| 48 |
+
// number can be automatically tuned, but only if we let a single
|
| 49 |
+
// process take charge, and have it broadcast the limits.
|
| 50 |
+
//
|
| 51 |
+
class TORCH_API ProcessGroupGloo : public Backend {
|
| 52 |
+
public:
|
| 53 |
+
// AsyncWork is the Gloo specific superclass for asynchronous work items.
|
| 54 |
+
// We can split asynchronous work into 3 phases:
|
| 55 |
+
// 1) Sanity checks and prepare input (e.g. memcpy)
|
| 56 |
+
// 2) Run operation on background thread
|
| 57 |
+
// 3) Synchronize with completion on foreground thread
|
| 58 |
+
//
|
| 59 |
+
// There is state to be shared between these 3 phases and all of this state
|
| 60 |
+
// is captured in the AsyncWork class and its derivatives.
|
| 61 |
+
//
|
| 62 |
+
// Note: while we are porting operations to use new style collectives, there
|
| 63 |
+
// is a split between operations using the existing caching approach and
|
| 64 |
+
// operations using the new AsyncWork base class. Over time we will port
|
| 65 |
+
// all operations and perform needed cleanup.
|
| 66 |
+
//
|
| 67 |
+
// FIXME: This probably should be called WorkGloo since the work is executed
|
| 68 |
+
// in sync mode by a background thread.
|
| 69 |
+
class TORCH_API AsyncWork : public Work {
|
| 70 |
+
public:
|
| 71 |
+
explicit AsyncWork(
|
| 72 |
+
std::vector<std::vector<at::Tensor>> outputTensors,
|
| 73 |
+
OpType opType,
|
| 74 |
+
uint64_t seq,
|
| 75 |
+
const char* profilingTitle = nullptr,
|
| 76 |
+
const std::optional<std::vector<at::Tensor>>& inputTensors =
|
| 77 |
+
std::nullopt);
|
| 78 |
+
|
| 79 |
+
~AsyncWork() override = default;
|
| 80 |
+
|
| 81 |
+
static void execute(const c10::intrusive_ptr<AsyncWork>& work);
|
| 82 |
+
|
| 83 |
+
virtual void run() = 0;
|
| 84 |
+
|
| 85 |
+
std::vector<at::Tensor> result() override;
|
| 86 |
+
|
| 87 |
+
c10::intrusive_ptr<c10::ivalue::Future> getFuture() override;
|
| 88 |
+
uint64_t getSequencenumber() const override;
|
| 89 |
+
|
| 90 |
+
protected:
|
| 91 |
+
friend class ProcessGroupGloo;
|
| 92 |
+
|
| 93 |
+
private:
|
| 94 |
+
void finishWorkGloo();
|
| 95 |
+
void finishWorkGlooError(const std::exception_ptr& eptr);
|
| 96 |
+
inline void recordAsyncWorkProfilingInfo(
|
| 97 |
+
const char* profilingTitle,
|
| 98 |
+
const std::optional<std::vector<at::Tensor>>& inputTensors);
|
| 99 |
+
|
| 100 |
+
const std::vector<std::vector<at::Tensor>> outputTensors_;
|
| 101 |
+
c10::intrusive_ptr<at::ivalue::Future> future_;
|
| 102 |
+
std::function<void()> recordFunctionBeforeCallback_;
|
| 103 |
+
const uint64_t seq_;
|
| 104 |
+
};
|
| 105 |
+
|
| 106 |
+
// Wrap c10d store as Gloo store
|
| 107 |
+
class TORCH_API GlooStore : public ::gloo::rendezvous::Store {
|
| 108 |
+
public:
|
| 109 |
+
GlooStore(const c10::intrusive_ptr<::c10d::Store>& store) : store_(store) {}
|
| 110 |
+
|
| 111 |
+
void setUint(const std::string& key, const std::vector<uint8_t>& value) {
|
| 112 |
+
store_->set(key, value);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
void set(const std::string& key, const std::vector<char>& value) override {
|
| 116 |
+
std::vector<uint8_t> tmp(value.begin(), value.end());
|
| 117 |
+
store_->set(key, tmp);
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
std::vector<uint8_t> getUint(const std::string& key) {
|
| 121 |
+
auto value = store_->get(key);
|
| 122 |
+
return value;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
std::vector<char> get(const std::string& key) override {
|
| 126 |
+
auto value = store_->get(key);
|
| 127 |
+
return std::vector<char>(value.begin(), value.end());
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
void wait(const std::vector<std::string>& keys) override {
|
| 131 |
+
store_->wait(keys, ::c10d::Store::kDefaultTimeout);
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
void wait(
|
| 135 |
+
const std::vector<std::string>& keys,
|
| 136 |
+
const std::chrono::milliseconds& timeout) override {
|
| 137 |
+
store_->wait(keys, timeout);
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
#ifdef GLOO_STORE_HAS_STORE_V2
|
| 141 |
+
bool has_v2_support() override {
|
| 142 |
+
return store_->hasExtendedApi();
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
std::vector<std::vector<char>> multi_get(
|
| 146 |
+
const std::vector<std::string>& keys) override {
|
| 147 |
+
std::vector<std::vector<char>> res;
|
| 148 |
+
for (auto& value : store_->multiGet(keys)) {
|
| 149 |
+
res.emplace_back(value.begin(), value.end());
|
| 150 |
+
}
|
| 151 |
+
return res;
|
| 152 |
+
}
|
| 153 |
+
|
| 154 |
+
void multi_set(
|
| 155 |
+
const std::vector<std::string>& keys,
|
| 156 |
+
const std::vector<std::vector<char>>& values) override {
|
| 157 |
+
std::vector<std::vector<uint8_t>> u_values;
|
| 158 |
+
u_values.reserve(values.size());
|
| 159 |
+
for (auto& value : values) {
|
| 160 |
+
u_values.emplace_back(value.begin(), value.end());
|
| 161 |
+
}
|
| 162 |
+
store_->multiSet(keys, u_values);
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
void append(const std::string& key, const std::vector<char>& value)
|
| 166 |
+
override {
|
| 167 |
+
std::vector<uint8_t> tmp(value.begin(), value.end());
|
| 168 |
+
return store_->append(key, tmp);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
int64_t add(const std::string& key, int64_t value) override {
|
| 172 |
+
return store_->add(key, value);
|
| 173 |
+
}
|
| 174 |
+
#endif
|
| 175 |
+
|
| 176 |
+
protected:
|
| 177 |
+
c10::intrusive_ptr<::c10d::Store> store_;
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
// For send and recv operations there is no need to pass them to the
|
| 181 |
+
// thread pool as they are entirely completed by the device thread.
|
| 182 |
+
// This work object is used to synchronize completion of the send or
|
| 183 |
+
// recv operation. It keeps a reference to the tensor it is
|
| 184 |
+
// operating on to prevent it from being deallocated while the
|
| 185 |
+
// operation is still in flight.
|
| 186 |
+
class TORCH_API SendWork : public Work {
|
| 187 |
+
public:
|
| 188 |
+
explicit SendWork(
|
| 189 |
+
at::Tensor& tensor,
|
| 190 |
+
std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
|
| 191 |
+
uint64_t seq);
|
| 192 |
+
|
| 193 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
|
| 194 |
+
|
| 195 |
+
void abort() override;
|
| 196 |
+
|
| 197 |
+
uint64_t getSequencenumber() const override;
|
| 198 |
+
|
| 199 |
+
protected:
|
| 200 |
+
at::Tensor tensor_;
|
| 201 |
+
std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
|
| 202 |
+
const uint64_t seq_;
|
| 203 |
+
};
|
| 204 |
+
|
| 205 |
+
class TORCH_API RecvWork : public Work {
|
| 206 |
+
public:
|
| 207 |
+
explicit RecvWork(
|
| 208 |
+
at::Tensor& tensor,
|
| 209 |
+
std::unique_ptr<::gloo::transport::UnboundBuffer> buffer,
|
| 210 |
+
OpType opType,
|
| 211 |
+
uint64_t seq,
|
| 212 |
+
const char* profilingTitle = nullptr);
|
| 213 |
+
|
| 214 |
+
int sourceRank() const override;
|
| 215 |
+
|
| 216 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override;
|
| 217 |
+
|
| 218 |
+
void abort() override;
|
| 219 |
+
|
| 220 |
+
uint64_t getSequencenumber() const override;
|
| 221 |
+
|
| 222 |
+
protected:
|
| 223 |
+
at::Tensor tensor_;
|
| 224 |
+
std::unique_ptr<::gloo::transport::UnboundBuffer> buffer_;
|
| 225 |
+
int srcRank_;
|
| 226 |
+
const uint64_t seq_;
|
| 227 |
+
};
|
| 228 |
+
|
| 229 |
+
struct TORCH_API Options : public Backend::Options {
|
| 230 |
+
explicit Options(
|
| 231 |
+
std::chrono::milliseconds timeout = kBackendDefaultTimeout);
|
| 232 |
+
|
| 233 |
+
// return intrusive_ptr of the object
|
| 234 |
+
static c10::intrusive_ptr<Options> create(
|
| 235 |
+
std::chrono::milliseconds timeout = kBackendDefaultTimeout) {
|
| 236 |
+
return c10::make_intrusive<Options>(timeout);
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
std::vector<std::shared_ptr<::gloo::transport::Device>> devices;
|
| 240 |
+
int threads;
|
| 241 |
+
};
|
| 242 |
+
|
| 243 |
+
const std::string getBackendName() const override {
|
| 244 |
+
return std::string(GLOO_BACKEND_NAME);
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
// Helper functions to create a new device object.
|
| 248 |
+
// They are static functions on this class to keep them logically
|
| 249 |
+
// separate from the rest of the code base (e.g. torch/csrc/distributed).
|
| 250 |
+
|
| 251 |
+
// Create new device instance for specific interface.
|
| 252 |
+
static std::shared_ptr<::gloo::transport::Device> createDeviceForInterface(
|
| 253 |
+
const std::string& interface);
|
| 254 |
+
|
| 255 |
+
// Create new device instance for specific hostname or address.
|
| 256 |
+
static std::shared_ptr<::gloo::transport::Device> createDeviceForHostname(
|
| 257 |
+
const std::string& hostname);
|
| 258 |
+
|
| 259 |
+
// Create new device instance.
|
| 260 |
+
// It tries to resolve this machine's hostname and bind to that address.
|
| 261 |
+
// If that fails (i.e. the hostname doesn't resolve to an address), it
|
| 262 |
+
// falls back to binding to the loopback address.
|
| 263 |
+
static std::shared_ptr<::gloo::transport::Device> createDefaultDevice();
|
| 264 |
+
|
| 265 |
+
// Create ProcessGroupGloo instance.
|
| 266 |
+
static c10::intrusive_ptr<ProcessGroupGloo> createProcessGroupGloo(
|
| 267 |
+
const c10::intrusive_ptr<Store>& store,
|
| 268 |
+
int rank,
|
| 269 |
+
int size,
|
| 270 |
+
std::chrono::milliseconds timeout);
|
| 271 |
+
|
| 272 |
+
explicit ProcessGroupGloo(
|
| 273 |
+
const c10::intrusive_ptr<Store>& store,
|
| 274 |
+
int rank,
|
| 275 |
+
int size,
|
| 276 |
+
c10::intrusive_ptr<Options> options = Options::create());
|
| 277 |
+
|
| 278 |
+
~ProcessGroupGloo() override;
|
| 279 |
+
|
| 280 |
+
c10::intrusive_ptr<Options> getOptions() {
|
| 281 |
+
return options_;
|
| 282 |
+
}
|
| 283 |
+
|
| 284 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 285 |
+
std::vector<at::Tensor>& tensors,
|
| 286 |
+
const BroadcastOptions& opts = BroadcastOptions()) override;
|
| 287 |
+
|
| 288 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 289 |
+
std::vector<at::Tensor>& tensors,
|
| 290 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 291 |
+
|
| 292 |
+
c10::intrusive_ptr<Work> allreduce_sparse(
|
| 293 |
+
std::vector<at::Tensor>& tensors,
|
| 294 |
+
const AllreduceOptions& opts = AllreduceOptions()) override;
|
| 295 |
+
|
| 296 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 297 |
+
std::vector<at::Tensor>& tensors,
|
| 298 |
+
const AllreduceCoalescedOptions& opts =
|
| 299 |
+
AllreduceCoalescedOptions()) override;
|
| 300 |
+
|
| 301 |
+
c10::intrusive_ptr<Work> reduce(
|
| 302 |
+
std::vector<at::Tensor>& tensors,
|
| 303 |
+
const ReduceOptions& opts = ReduceOptions()) override;
|
| 304 |
+
|
| 305 |
+
c10::intrusive_ptr<Work> _reduce_scatter_base(
|
| 306 |
+
at::Tensor& outputTensor,
|
| 307 |
+
at::Tensor& inputTensor,
|
| 308 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 309 |
+
|
| 310 |
+
c10::intrusive_ptr<Work> _allgather_base(
|
| 311 |
+
at::Tensor& output_tensor,
|
| 312 |
+
at::Tensor& input_tensor,
|
| 313 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 314 |
+
|
| 315 |
+
c10::intrusive_ptr<Work> allgather(
|
| 316 |
+
std::vector<std::vector<at::Tensor>>& outputs,
|
| 317 |
+
std::vector<at::Tensor>& inputs,
|
| 318 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 319 |
+
|
| 320 |
+
c10::intrusive_ptr<Work> allgather_coalesced(
|
| 321 |
+
std::vector<std::vector<at::Tensor>>& output_lists,
|
| 322 |
+
std::vector<at::Tensor>& input_list,
|
| 323 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 324 |
+
|
| 325 |
+
c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
|
| 326 |
+
std::vector<at::Tensor>& outputs,
|
| 327 |
+
std::vector<at::Tensor>& inputs,
|
| 328 |
+
const AllgatherOptions& opts = AllgatherOptions()) override;
|
| 329 |
+
|
| 330 |
+
c10::intrusive_ptr<Work> gather(
|
| 331 |
+
std::vector<std::vector<at::Tensor>>& outputs,
|
| 332 |
+
std::vector<at::Tensor>& inputs,
|
| 333 |
+
const GatherOptions& opts = GatherOptions()) override;
|
| 334 |
+
|
| 335 |
+
c10::intrusive_ptr<Work> scatter(
|
| 336 |
+
std::vector<at::Tensor>& outputs,
|
| 337 |
+
std::vector<std::vector<at::Tensor>>& inputs,
|
| 338 |
+
const ScatterOptions& opts = ScatterOptions()) override;
|
| 339 |
+
|
| 340 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 341 |
+
std::vector<at::Tensor>& outputs,
|
| 342 |
+
std::vector<std::vector<at::Tensor>>& inputs,
|
| 343 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 344 |
+
|
| 345 |
+
c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
|
| 346 |
+
std::vector<at::Tensor>& outputTensors,
|
| 347 |
+
std::vector<at::Tensor>& inputTensors,
|
| 348 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override;
|
| 349 |
+
|
| 350 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 351 |
+
at::Tensor& outputTensor,
|
| 352 |
+
at::Tensor& inputTensor,
|
| 353 |
+
std::vector<int64_t>& outputCounts,
|
| 354 |
+
std::vector<int64_t>& inputCounts,
|
| 355 |
+
const AllToAllOptions& opts = AllToAllOptions()) override;
|
| 356 |
+
|
| 357 |
+
c10::intrusive_ptr<Work> send(
|
| 358 |
+
std::vector<at::Tensor>& tensors,
|
| 359 |
+
int dstRank,
|
| 360 |
+
int tag) override;
|
| 361 |
+
|
| 362 |
+
c10::intrusive_ptr<Work> recv(
|
| 363 |
+
std::vector<at::Tensor>& tensors,
|
| 364 |
+
int srcRank,
|
| 365 |
+
int tag) override;
|
| 366 |
+
|
| 367 |
+
c10::intrusive_ptr<Work> recvAnysource(
|
| 368 |
+
std::vector<at::Tensor>& tensors,
|
| 369 |
+
int tag) override;
|
| 370 |
+
|
| 371 |
+
c10::intrusive_ptr<Work> barrier(
|
| 372 |
+
const BarrierOptions& opts = BarrierOptions()) override;
|
| 373 |
+
|
| 374 |
+
void enableCollectivesTiming() override;
|
| 375 |
+
|
| 376 |
+
const std::unique_ptr<::gloo::rendezvous::Store>& _getStore() const {
|
| 377 |
+
return store_;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
// Similar to barrier(), but blocks rank 0 until all other ranks have
|
| 381 |
+
// acknowledged that they are alive (through send/recv from rank 0). Rank 0
|
| 382 |
+
// is able to report all failed ranks if waitAllRanks = true, otherwise
|
| 383 |
+
// reports the first rank it detected as failed.
|
| 384 |
+
void monitoredBarrier(
|
| 385 |
+
const BarrierOptions& opts = BarrierOptions(),
|
| 386 |
+
bool waitAllRanks = false) override;
|
| 387 |
+
|
| 388 |
+
// Agrees on an initial sequence number for the whole group by having rank 0
|
| 389 |
+
// create it and broadcast it to other ranks using the store.
|
| 390 |
+
void setSequenceNumberForGroup() override;
|
| 391 |
+
|
| 392 |
+
// Retrieves the current sequence number for the whole group, which should be
|
| 393 |
+
// in sync. If the returned number is not consistent across the group, it
|
| 394 |
+
// may indicate that there is some sort of collective desynchronization.
|
| 395 |
+
uint64_t getSequenceNumberForGroup() override;
|
| 396 |
+
|
| 397 |
+
int getNumThreads() {
|
| 398 |
+
return options_->threads;
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
protected:
|
| 402 |
+
std::unique_ptr<::gloo::rendezvous::Store> store_;
|
| 403 |
+
const c10::intrusive_ptr<Options> options_;
|
| 404 |
+
|
| 405 |
+
// Every Gloo context represents a set of connections to its peers.
|
| 406 |
+
// In order to use more than one device (or allow for parallelism on
|
| 407 |
+
// a single device), you need multiple contexts.
|
| 408 |
+
std::vector<std::shared_ptr<::gloo::Context>> contexts_;
|
| 409 |
+
std::vector<std::thread> threads_;
|
| 410 |
+
bool stop_;
|
| 411 |
+
|
| 412 |
+
// Incremented for every collective we kick off.
|
| 413 |
+
// The value is used as tag for collective operations. Collectives are kicked
|
| 414 |
+
// off in identical order across processes. Therefore the tag can be used
|
| 415 |
+
// to match up operations during concurrent execution.
|
| 416 |
+
uint32_t collectiveCounter_;
|
| 417 |
+
|
| 418 |
+
// Returns next collective tag to use (uses collectiveCounter_).
|
| 419 |
+
uint32_t nextTag();
|
| 420 |
+
|
| 421 |
+
// Returns the context to use for the specified tag.
|
| 422 |
+
// With `nextTag` returning an increasing number, this should lead
|
| 423 |
+
// to contexts being used in a round-robin fashion.
|
| 424 |
+
std::shared_ptr<::gloo::Context> getContext(uint32_t tag);
|
| 425 |
+
|
| 426 |
+
// Entrypoint for worker threads.
|
| 427 |
+
void runLoop(int workerIndex);
|
| 428 |
+
|
| 429 |
+
// Queue work to run on worker thread.
|
| 430 |
+
void enqueue(c10::intrusive_ptr<AsyncWork> work);
|
| 431 |
+
|
| 432 |
+
// Keep both a queue of pending work, and a vector with in progress work.
|
| 433 |
+
// Both of these can only be mutated when holding the queue lock.
|
| 434 |
+
// We keep both around instead of just the queue, so we can grab a weak_ptr
|
| 435 |
+
// to all in progress and pending work when executing a barrier.
|
| 436 |
+
// When executing a barrier, we need to ensure that all prior work
|
| 437 |
+
// has completed before completing itself.
|
| 438 |
+
std::deque<c10::intrusive_ptr<AsyncWork>> workQueue_;
|
| 439 |
+
std::vector<c10::intrusive_ptr<AsyncWork>> workInProgress_;
|
| 440 |
+
std::mutex workMutex_;
|
| 441 |
+
std::condition_variable workProduceCV_;
|
| 442 |
+
std::condition_variable workConsumeCV_;
|
| 443 |
+
uint64_t seq_{0};
|
| 444 |
+
};
|
| 445 |
+
|
| 446 |
+
} // namespace c10d
|
| 447 |
+
|
| 448 |
+
#endif // USE_C10D_GLOO
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/PyProcessGroup.hpp
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/ProcessGroup.hpp>
|
| 4 |
+
#include <torch/csrc/jit/python/pybind_utils.h>
|
| 5 |
+
#include <torch/csrc/utils/pybind.h>
|
| 6 |
+
|
| 7 |
+
namespace c10d {
|
| 8 |
+
|
| 9 |
+
// PyProcessGroup is a pybind11 trampoline class to allow a Python
|
| 10 |
+
// class to inherit from torch.distributed.ProcessGroup
|
| 11 |
+
class PyProcessGroup : public ProcessGroup {
|
| 12 |
+
public:
|
| 13 |
+
// PyWork is a pybind11 trampoline class to allow a Python
|
| 14 |
+
// class to inherit from torch.distributed.Work
|
| 15 |
+
class TORCH_PYTHON_API PyWork : public Work {
|
| 16 |
+
public:
|
| 17 |
+
PyWork() = default;
|
| 18 |
+
|
| 19 |
+
bool wait(std::chrono::milliseconds timeout = kNoTimeout) override {
|
| 20 |
+
PYBIND11_OVERRIDE(
|
| 21 |
+
bool, /* Return type */
|
| 22 |
+
Work, /* Parent class */
|
| 23 |
+
wait, /* Name of function in C++ */
|
| 24 |
+
timeout);
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
c10::intrusive_ptr<c10::ivalue::Future> getFuture() override {
|
| 28 |
+
// We cannot use PYBIND11_OVERRIDE because:
|
| 29 |
+
// 1. We have to >MANUALLY< unwrap the PyFutureWrapper and
|
| 30 |
+
// 2. The python name is get_future
|
| 31 |
+
pybind11::gil_scoped_acquire gil;
|
| 32 |
+
auto override =
|
| 33 |
+
pybind11::get_override(static_cast<const Work*>(this), "get_future");
|
| 34 |
+
|
| 35 |
+
if (override) {
|
| 36 |
+
py::object o = override();
|
| 37 |
+
auto futWrapper =
|
| 38 |
+
o.cast<std::shared_ptr<torch::jit::PythonFutureWrapper>>();
|
| 39 |
+
return futWrapper->fut;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
return Work::getFuture();
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
// Take a reference of the corresponding py::object.
|
| 46 |
+
// With functional collectives, ownership of work objects is generally
|
| 47 |
+
// transferred to C++. For pure C++ work objects, it is sufficient to
|
| 48 |
+
// transfer the ownership of work object. For user-defined work objects in
|
| 49 |
+
// Python, it is necessary to keep the corresponding py::object alive in
|
| 50 |
+
// addition to ensure that the user-defined methods can be executed.
|
| 51 |
+
void ref_py_object() {
|
| 52 |
+
py_obj_ = py::cast(this);
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
private:
|
| 56 |
+
py::object py_obj_;
|
| 57 |
+
};
|
| 58 |
+
|
| 59 |
+
using ProcessGroup::ProcessGroup;
|
| 60 |
+
|
| 61 |
+
const std::string getBackendName() const override {
|
| 62 |
+
PYBIND11_OVERRIDE_PURE(
|
| 63 |
+
std::string, /* Return type */
|
| 64 |
+
ProcessGroup, /* Parent class */
|
| 65 |
+
getBackendName, /* Name of function in C++ */
|
| 66 |
+
);
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
c10::intrusive_ptr<Work> allgather(
|
| 70 |
+
std::vector<std::vector<at::Tensor>>& outputTensors,
|
| 71 |
+
std::vector<at::Tensor>& inputTensors,
|
| 72 |
+
const AllgatherOptions& opts = AllgatherOptions()) override {
|
| 73 |
+
PYBIND11_OVERRIDE(
|
| 74 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 75 |
+
ProcessGroup, /* Parent class */
|
| 76 |
+
allgather, /* Name of function in C++ */
|
| 77 |
+
outputTensors,
|
| 78 |
+
inputTensors,
|
| 79 |
+
opts);
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
c10::intrusive_ptr<Work> allgather_into_tensor_coalesced(
|
| 83 |
+
std::vector<at::Tensor>& outputTensors,
|
| 84 |
+
std::vector<at::Tensor>& inputTensors,
|
| 85 |
+
const AllgatherOptions& opts = AllgatherOptions()) override {
|
| 86 |
+
PYBIND11_OVERRIDE(
|
| 87 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 88 |
+
ProcessGroup, /* Parent class */
|
| 89 |
+
allgather_into_tensor_coalesced, /* Name of function in C++ */
|
| 90 |
+
outputTensors,
|
| 91 |
+
inputTensors,
|
| 92 |
+
opts);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
c10::intrusive_ptr<Work> allreduce(
|
| 96 |
+
std::vector<at::Tensor>& tensors,
|
| 97 |
+
const AllreduceOptions& opts = AllreduceOptions()) override {
|
| 98 |
+
PYBIND11_OVERRIDE(
|
| 99 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 100 |
+
ProcessGroup, /* Parent class */
|
| 101 |
+
allreduce, /* Name of function in C++ */
|
| 102 |
+
tensors,
|
| 103 |
+
opts);
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
c10::intrusive_ptr<Work> allreduce_coalesced(
|
| 107 |
+
std::vector<at::Tensor>& tensors,
|
| 108 |
+
const AllreduceCoalescedOptions& opts =
|
| 109 |
+
AllreduceCoalescedOptions()) override {
|
| 110 |
+
PYBIND11_OVERRIDE(
|
| 111 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 112 |
+
ProcessGroup, /* Parent class */
|
| 113 |
+
allreduce_coalesced, /* Name of function in C++ */
|
| 114 |
+
tensors,
|
| 115 |
+
opts);
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
c10::intrusive_ptr<Work> alltoall_base(
|
| 119 |
+
at::Tensor& outputBuffer,
|
| 120 |
+
at::Tensor& inputBuffer,
|
| 121 |
+
std::vector<int64_t>& outputSplitSizes,
|
| 122 |
+
std::vector<int64_t>& inputSplitSizes,
|
| 123 |
+
const AllToAllOptions& opts = AllToAllOptions()) override {
|
| 124 |
+
PYBIND11_OVERRIDE(
|
| 125 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 126 |
+
ProcessGroup, /* Parent class */
|
| 127 |
+
alltoall_base, /* Name of function in C++ */
|
| 128 |
+
outputBuffer,
|
| 129 |
+
inputBuffer,
|
| 130 |
+
outputSplitSizes,
|
| 131 |
+
inputSplitSizes,
|
| 132 |
+
opts);
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
c10::intrusive_ptr<Work> barrier(
|
| 136 |
+
const BarrierOptions& opts = BarrierOptions()) override {
|
| 137 |
+
PYBIND11_OVERRIDE(
|
| 138 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 139 |
+
ProcessGroup, /* Parent class */
|
| 140 |
+
barrier, /* Name of function in C++ */
|
| 141 |
+
opts);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
c10::intrusive_ptr<Work> broadcast(
|
| 145 |
+
std::vector<at::Tensor>& tensors,
|
| 146 |
+
const BroadcastOptions& opts = BroadcastOptions()) override {
|
| 147 |
+
PYBIND11_OVERRIDE(
|
| 148 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 149 |
+
ProcessGroup, /* Parent class */
|
| 150 |
+
broadcast, /* Name of function in C++ */
|
| 151 |
+
tensors,
|
| 152 |
+
opts);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
c10::intrusive_ptr<Work> reduce_scatter(
|
| 156 |
+
std::vector<at::Tensor>& outputTensors,
|
| 157 |
+
std::vector<std::vector<at::Tensor>>& inputTensors,
|
| 158 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override {
|
| 159 |
+
PYBIND11_OVERRIDE(
|
| 160 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 161 |
+
ProcessGroup, /* Parent class */
|
| 162 |
+
reduce_scatter, /* Name of function in C++ */
|
| 163 |
+
outputTensors,
|
| 164 |
+
inputTensors,
|
| 165 |
+
opts);
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
c10::intrusive_ptr<Work> reduce_scatter_tensor_coalesced(
|
| 169 |
+
std::vector<at::Tensor>& outputTensors,
|
| 170 |
+
std::vector<at::Tensor>& inputTensors,
|
| 171 |
+
const ReduceScatterOptions& opts = ReduceScatterOptions()) override {
|
| 172 |
+
PYBIND11_OVERRIDE(
|
| 173 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 174 |
+
ProcessGroup, /* Parent class */
|
| 175 |
+
reduce_scatter_tensor_coalesced, /* Name of function in C++ */
|
| 176 |
+
outputTensors,
|
| 177 |
+
inputTensors,
|
| 178 |
+
opts);
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
c10::intrusive_ptr<Work> send(
|
| 182 |
+
std::vector<at::Tensor>& tensors,
|
| 183 |
+
int dstRank,
|
| 184 |
+
int tag) override {
|
| 185 |
+
PYBIND11_OVERRIDE(
|
| 186 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 187 |
+
ProcessGroup, /* Parent class */
|
| 188 |
+
send, /* Name of function in C++ */
|
| 189 |
+
tensors,
|
| 190 |
+
dstRank,
|
| 191 |
+
tag);
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
c10::intrusive_ptr<Work> recv(
|
| 195 |
+
std::vector<at::Tensor>& tensors,
|
| 196 |
+
int srcRank,
|
| 197 |
+
int tag) override {
|
| 198 |
+
PYBIND11_OVERRIDE(
|
| 199 |
+
c10::intrusive_ptr<Work>, /* Return type */
|
| 200 |
+
ProcessGroup, /* Parent class */
|
| 201 |
+
recv, /* Name of function in C++ */
|
| 202 |
+
tensors,
|
| 203 |
+
srcRank,
|
| 204 |
+
tag);
|
| 205 |
+
}
|
| 206 |
+
};
|
| 207 |
+
|
| 208 |
+
class TORCH_PYTHON_API PythonOnCompletionHook {
|
| 209 |
+
public:
|
| 210 |
+
// Wraps a py::object hook and acquires Python GIL in dtor before
|
| 211 |
+
// destructing the hook object.
|
| 212 |
+
PythonOnCompletionHook(py::object hook) : hook_(std::move(hook)) {}
|
| 213 |
+
|
| 214 |
+
~PythonOnCompletionHook() {
|
| 215 |
+
py::gil_scoped_acquire ag;
|
| 216 |
+
hook_.dec_ref();
|
| 217 |
+
// Explicitly set hook_ to nullptr to prevent py::object's dtor
|
| 218 |
+
// to decref on the PyObject again.
|
| 219 |
+
// See Note [Destructing py::object] in python_ivalue.h
|
| 220 |
+
hook_.ptr() = nullptr;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
void operator()(const std::shared_ptr<WorkInfo>& workInfo) const {
|
| 224 |
+
std::exception_ptr eptr;
|
| 225 |
+
{
|
| 226 |
+
py::gil_scoped_acquire acquire;
|
| 227 |
+
try {
|
| 228 |
+
hook_(workInfo);
|
| 229 |
+
} catch (py::error_already_set& e) {
|
| 230 |
+
// py::error_already_set requires GIL to destruct, take
|
| 231 |
+
// special care.
|
| 232 |
+
eptr = std::make_exception_ptr(std::runtime_error(e.what()));
|
| 233 |
+
e.restore();
|
| 234 |
+
PyErr_Clear();
|
| 235 |
+
} catch (std::exception& e) {
|
| 236 |
+
eptr = std::current_exception();
|
| 237 |
+
}
|
| 238 |
+
}
|
| 239 |
+
// No more Python-related stuff at this point, i.e., this
|
| 240 |
+
// exception can be captured and handled by PG backend.
|
| 241 |
+
if (eptr)
|
| 242 |
+
std::rethrow_exception(eptr);
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
private:
|
| 246 |
+
py::object hook_;
|
| 247 |
+
};
|
| 248 |
+
|
| 249 |
+
} // namespace c10d
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/UnixSockUtils.hpp
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/Utils.hpp>
|
| 4 |
+
|
| 5 |
+
namespace c10d::tcputil {
|
| 6 |
+
|
| 7 |
+
#define CONNECT_SOCKET_OFFSET 2
|
| 8 |
+
|
| 9 |
+
inline int poll(struct pollfd* fds, unsigned long nfds, int timeout) {
|
| 10 |
+
return ::poll(fds, nfds, timeout);
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
inline void addPollfd(
|
| 14 |
+
std::vector<struct pollfd>& fds,
|
| 15 |
+
int socket,
|
| 16 |
+
short events) {
|
| 17 |
+
fds.push_back({.fd = socket, .events = events});
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
inline struct ::pollfd getPollfd(int socket, short events) {
|
| 21 |
+
struct ::pollfd res = {.fd = socket, .events = events};
|
| 22 |
+
return res;
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
} // namespace c10d::tcputil
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/c10d/debug.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Meta Platforms, Inc. and its affiliates.
|
| 2 |
+
// All rights reserved.
|
| 3 |
+
//
|
| 4 |
+
// This source code is licensed under the BSD-style license found in the
|
| 5 |
+
// LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
#pragma once
|
| 8 |
+
|
| 9 |
+
#include <c10/macros/Macros.h>
|
| 10 |
+
|
| 11 |
+
namespace c10d {
|
| 12 |
+
|
| 13 |
+
enum class DebugLevel { Off = 0, Info = 1, Detail = 2 };
|
| 14 |
+
|
| 15 |
+
TORCH_API void setDebugLevel(DebugLevel level);
|
| 16 |
+
|
| 17 |
+
// Sets the debug level based on the value of the `TORCH_DISTRIBUTED_DEBUG`
|
| 18 |
+
// environment variable.
|
| 19 |
+
TORCH_API void setDebugLevelFromEnvironment();
|
| 20 |
+
|
| 21 |
+
TORCH_API DebugLevel debug_level() noexcept;
|
| 22 |
+
|
| 23 |
+
} // namespace c10d
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/agent_utils.h
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/utils.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace distributed {
|
| 8 |
+
namespace rpc {
|
| 9 |
+
|
| 10 |
+
// All RPC peers should call into this function at the same time. Each peer
|
| 11 |
+
// provides its own id and name, and this function uses the given Store to
|
| 12 |
+
// gather global name-to-id mapping on all peers.
|
| 13 |
+
TORCH_API std::unordered_map<std::string, worker_id_t> collectNames(
|
| 14 |
+
::c10d::PrefixStore store,
|
| 15 |
+
const worker_id_t selfId,
|
| 16 |
+
const std::string& selfName,
|
| 17 |
+
const int worldSize);
|
| 18 |
+
|
| 19 |
+
// Ranks in dynamic RPC groups will initially call into this to establish the
|
| 20 |
+
// name-to-id mapping for the current peers in the group. The current rank will
|
| 21 |
+
// put its own worker info in the store and discover all the ranks that came
|
| 22 |
+
// before it. NOTE: This needs to be called with the Dynamic RPC group
|
| 23 |
+
// membership management token held.
|
| 24 |
+
TORCH_API std::unordered_map<std::string, worker_id_t> collectCurrentNames(
|
| 25 |
+
::c10d::PrefixStore store,
|
| 26 |
+
const worker_id_t selfId,
|
| 27 |
+
const std::string& selfName);
|
| 28 |
+
|
| 29 |
+
// Remove name frmo Store, used in dynamic RPC groups.
|
| 30 |
+
// NOTE: This needs to be called with the Dynamic RPC group
|
| 31 |
+
// membership management token held.
|
| 32 |
+
TORCH_API void removeCurrentName(
|
| 33 |
+
::c10d::PrefixStore store,
|
| 34 |
+
const worker_id_t selfId,
|
| 35 |
+
const std::string& selfName);
|
| 36 |
+
|
| 37 |
+
// This performs a synchronization of all call counts by using store.
|
| 38 |
+
// All RPC peers wait for others to join to exit at the same time.
|
| 39 |
+
TORCH_API int syncCallCount(
|
| 40 |
+
::c10d::PrefixStore store,
|
| 41 |
+
const int worldSize,
|
| 42 |
+
int activeCalls = 0);
|
| 43 |
+
|
| 44 |
+
} // namespace rpc
|
| 45 |
+
} // namespace distributed
|
| 46 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/message.h
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/types.h>
|
| 4 |
+
#include <vector>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
namespace distributed {
|
| 8 |
+
namespace rpc {
|
| 9 |
+
|
| 10 |
+
// An enum denoting common RPC errors to allow specific error handling for them.
|
| 11 |
+
enum RPCErrorType {
|
| 12 |
+
UNKNOWN_ERROR = 0, /* Indicates that error type could not be parsed */
|
| 13 |
+
TIMEOUT = 1, /* Indicates that the RPC has timed out */
|
| 14 |
+
INTENTIONAL_FAILURE = 2 /* Deliberate failure, such as those injected by
|
| 15 |
+
FaultyAgent for testing */
|
| 16 |
+
};
|
| 17 |
+
|
| 18 |
+
// The enum values are bitwise ORed with MessageType
|
| 19 |
+
// They are bit flags starting from 0x100 and should have
|
| 20 |
+
// value such as 0x100, 0x200, 0x400, 0x800, 0xF00, etc.
|
| 21 |
+
enum MessageTypeFlags {
|
| 22 |
+
REQUEST_TYPE = 0x100,
|
| 23 |
+
RESPONSE_TYPE = 0x200,
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
// Message types must have values between 0x00 to 0xff
|
| 27 |
+
enum MessageType {
|
| 28 |
+
// messages for dist.rpc on builtin operators
|
| 29 |
+
SCRIPT_CALL = 0x00 | MessageTypeFlags::REQUEST_TYPE,
|
| 30 |
+
SCRIPT_RET = 0x01 | MessageTypeFlags::RESPONSE_TYPE,
|
| 31 |
+
|
| 32 |
+
// messages for dist.rpc on Python UDF
|
| 33 |
+
PYTHON_CALL = 0x02 | MessageTypeFlags::REQUEST_TYPE,
|
| 34 |
+
PYTHON_RET = 0x03 | MessageTypeFlags::RESPONSE_TYPE,
|
| 35 |
+
|
| 36 |
+
// messages for dist.remote on builtin operators and Python UDF
|
| 37 |
+
SCRIPT_REMOTE_CALL = 0x04 |
|
| 38 |
+
MessageTypeFlags::REQUEST_TYPE, // A remote call on a builtin operator
|
| 39 |
+
PYTHON_REMOTE_CALL =
|
| 40 |
+
0x05 | MessageTypeFlags::REQUEST_TYPE, // A remote call on a Python UDF
|
| 41 |
+
REMOTE_RET =
|
| 42 |
+
0x06 | MessageTypeFlags::RESPONSE_TYPE, // Response for remote calls for
|
| 43 |
+
// UDF, builtin, or script
|
| 44 |
+
|
| 45 |
+
// RRef related internal messages
|
| 46 |
+
SCRIPT_RREF_FETCH_CALL =
|
| 47 |
+
0x07 | MessageTypeFlags::REQUEST_TYPE, // A UserRRef<IValue> fetches value
|
| 48 |
+
// from owner
|
| 49 |
+
PYTHON_RREF_FETCH_CALL =
|
| 50 |
+
0x08 | MessageTypeFlags::REQUEST_TYPE, // A UserRRef<py::object> fetches
|
| 51 |
+
// value from owner
|
| 52 |
+
SCRIPT_RREF_FETCH_RET = 0x09 |
|
| 53 |
+
MessageTypeFlags::RESPONSE_TYPE, // An OwnerRRef sends ivalue to user
|
| 54 |
+
PYTHON_RREF_FETCH_RET = 0x0a |
|
| 55 |
+
MessageTypeFlags::RESPONSE_TYPE, // An OwnerRRef sends py::object to user
|
| 56 |
+
RREF_USER_DELETE = 0x0b |
|
| 57 |
+
MessageTypeFlags::REQUEST_TYPE, // A UserRRef tells the owner to deref
|
| 58 |
+
RREF_FORK_REQUEST =
|
| 59 |
+
0x0c | MessageTypeFlags::REQUEST_TYPE, // A child UserRRef tells the owner
|
| 60 |
+
// about itself
|
| 61 |
+
RREF_CHILD_ACCEPT =
|
| 62 |
+
0x0d | MessageTypeFlags::REQUEST_TYPE, // A child UserRRef tells parent
|
| 63 |
+
// that owner knows it
|
| 64 |
+
RREF_ACK =
|
| 65 |
+
0x0e | MessageTypeFlags::RESPONSE_TYPE, // ACK to internal RRef messages
|
| 66 |
+
|
| 67 |
+
// Messages with autograd info
|
| 68 |
+
FORWARD_AUTOGRAD_REQ = 0x0f | MessageTypeFlags::REQUEST_TYPE,
|
| 69 |
+
FORWARD_AUTOGRAD_RESP = 0x10 | MessageTypeFlags::RESPONSE_TYPE,
|
| 70 |
+
|
| 71 |
+
// Messages to propagate gradients on the backward pass.
|
| 72 |
+
BACKWARD_AUTOGRAD_REQ = 0x11 | MessageTypeFlags::REQUEST_TYPE,
|
| 73 |
+
BACKWARD_AUTOGRAD_RESP = 0x12 | MessageTypeFlags::RESPONSE_TYPE,
|
| 74 |
+
|
| 75 |
+
// Messages to tell workers to clean up their autograd context.
|
| 76 |
+
CLEANUP_AUTOGRAD_CONTEXT_REQ = 0x13 | MessageTypeFlags::REQUEST_TYPE,
|
| 77 |
+
CLEANUP_AUTOGRAD_CONTEXT_RESP = 0x14 | MessageTypeFlags::RESPONSE_TYPE,
|
| 78 |
+
|
| 79 |
+
// Messages that tell workers to run requests with profiling enabled.
|
| 80 |
+
RUN_WITH_PROFILING_REQ = 0x15 | MessageTypeFlags::REQUEST_TYPE,
|
| 81 |
+
RUN_WITH_PROFILING_RESP = 0x16 | MessageTypeFlags::RESPONSE_TYPE,
|
| 82 |
+
|
| 83 |
+
// Messages to support RRef.backward().
|
| 84 |
+
RREF_BACKWARD_REQ = 0x17 | MessageTypeFlags::REQUEST_TYPE,
|
| 85 |
+
RREF_BACKWARD_RESP = 0x18 | MessageTypeFlags::RESPONSE_TYPE,
|
| 86 |
+
|
| 87 |
+
// Other internal message types
|
| 88 |
+
EXCEPTION = 0x37 | MessageTypeFlags::RESPONSE_TYPE,
|
| 89 |
+
UNKNOWN = 0x3c
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
// A message to be sent/received by an RpcAgent.
|
| 93 |
+
//
|
| 94 |
+
// A Message object contains 4 fields:
|
| 95 |
+
// payload (std::vector<char>): a binary chunk of data.
|
| 96 |
+
// tensors (std::vector<torch::Tensor>): all tensors. Tensor data are not
|
| 97 |
+
// included in the payload, and it is up to the RpcAgent implementation
|
| 98 |
+
// to determine how to serialize them. This design is helpful for
|
| 99 |
+
// communicating super large tensors where serializing all the data at
|
| 100 |
+
// once leads to excessively large memory footprint. An implementation
|
| 101 |
+
// can then serialize and send tensors chunk-by-chunk, in the streaming
|
| 102 |
+
// fashion.
|
| 103 |
+
// type (MessageType): type of the message.
|
| 104 |
+
// id (int64_t): message id, this is used to match request and response.
|
| 105 |
+
// Other implementation can ignore it if they have their own
|
| 106 |
+
// ways to do matching.
|
| 107 |
+
//
|
| 108 |
+
// Layers above ``RpcAgent`` only converts ScriptCall, ScriptResp, PythonCall,
|
| 109 |
+
// and PythonResp into a Message, and it is up to the RpcAgent
|
| 110 |
+
// implementation to determine how to serialize a message.
|
| 111 |
+
class TORCH_API Message final : public torch::CustomClassHolder {
|
| 112 |
+
private:
|
| 113 |
+
// Keep these private in order to force users to go through make_intrusive and
|
| 114 |
+
// thus prevent creating a Message that's not held by an intrusive_ptr.
|
| 115 |
+
Message();
|
| 116 |
+
|
| 117 |
+
Message(
|
| 118 |
+
std::vector<char>&& payload,
|
| 119 |
+
std::vector<torch::Tensor>&& tensors,
|
| 120 |
+
MessageType type);
|
| 121 |
+
|
| 122 |
+
Message(
|
| 123 |
+
std::vector<char>&& payload,
|
| 124 |
+
std::vector<torch::Tensor>&& tensors,
|
| 125 |
+
MessageType type,
|
| 126 |
+
int64_t id);
|
| 127 |
+
|
| 128 |
+
friend c10::intrusive_ptr<Message>;
|
| 129 |
+
|
| 130 |
+
public:
|
| 131 |
+
Message(const Message& other) = delete;
|
| 132 |
+
Message(Message&& other) = delete;
|
| 133 |
+
Message& operator=(Message const& rhs) = delete;
|
| 134 |
+
Message& operator=(Message&& rhs) = delete;
|
| 135 |
+
|
| 136 |
+
// Destructively retrieves the payload.
|
| 137 |
+
std::vector<char>&& movePayload() &&;
|
| 138 |
+
std::vector<torch::Tensor>&& moveTensors() &&;
|
| 139 |
+
|
| 140 |
+
std::vector<char>& payload();
|
| 141 |
+
const std::vector<char>& payload() const;
|
| 142 |
+
std::vector<torch::Tensor>& tensors();
|
| 143 |
+
const std::vector<torch::Tensor>& tensors() const;
|
| 144 |
+
MessageType type() const;
|
| 145 |
+
|
| 146 |
+
bool isRequest() const;
|
| 147 |
+
bool isResponse() const;
|
| 148 |
+
bool isShutdown() const;
|
| 149 |
+
|
| 150 |
+
// id is an optional field to match request/response. If an RpcAgent
|
| 151 |
+
// implementation is able to do the matching without using this id, it can be
|
| 152 |
+
// dropped during message serialization.
|
| 153 |
+
int64_t id() const;
|
| 154 |
+
void setId(int64_t id);
|
| 155 |
+
|
| 156 |
+
std::vector<c10::weak_intrusive_ptr<c10::StorageImpl>> getStorages() const;
|
| 157 |
+
|
| 158 |
+
private:
|
| 159 |
+
std::vector<char> payload_;
|
| 160 |
+
std::vector<torch::Tensor> tensors_;
|
| 161 |
+
MessageType type_ = MessageType::UNKNOWN;
|
| 162 |
+
int64_t id_ = -1;
|
| 163 |
+
};
|
| 164 |
+
|
| 165 |
+
// Create a response Message of type Exception.
|
| 166 |
+
// The exception string representation will be used as the message's payload.
|
| 167 |
+
// A message ID corresponding to the request that resulted in this response can
|
| 168 |
+
// be provided for matching requests/responses.
|
| 169 |
+
TORCH_API c10::intrusive_ptr<Message> createExceptionResponse(
|
| 170 |
+
const std::exception& e,
|
| 171 |
+
int64_t id);
|
| 172 |
+
|
| 173 |
+
// Create a response Message of type Exception.
|
| 174 |
+
// The passed in string representation will be used as the message's payload.
|
| 175 |
+
// A message ID corresponding to the request that resulted in this response can
|
| 176 |
+
// be provided for matching requests/responses.
|
| 177 |
+
TORCH_API c10::intrusive_ptr<Message> createExceptionResponse(
|
| 178 |
+
const std::string& exceptionStr,
|
| 179 |
+
int64_t id);
|
| 180 |
+
|
| 181 |
+
inline std::tuple<
|
| 182 |
+
c10::intrusive_ptr<Message>,
|
| 183 |
+
std::vector<c10::weak_intrusive_ptr<c10::StorageImpl>>>
|
| 184 |
+
withStorages(c10::intrusive_ptr<Message> message) {
|
| 185 |
+
auto storages = message->getStorages();
|
| 186 |
+
return std::make_tuple(std::move(message), std::move(storages));
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
using JitFuture = c10::ivalue::Future;
|
| 190 |
+
|
| 191 |
+
} // namespace rpc
|
| 192 |
+
} // namespace distributed
|
| 193 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/py_rref.h
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 4 |
+
#include <torch/csrc/python_headers.h>
|
| 5 |
+
#include <torch/csrc/utils/pybind.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace rpc {
|
| 10 |
+
|
| 11 |
+
enum RRefProxyType { RPC_SYNC, RPC_ASYNC, REMOTE };
|
| 12 |
+
|
| 13 |
+
// Python wrapper of an RRef shared_ptr that supports Python
|
| 14 |
+
// pickle and unpickle.
|
| 15 |
+
class PYBIND11_EXPORT PyRRef {
|
| 16 |
+
public:
|
| 17 |
+
// The first ctor can only be called while holding GIL. See its implementation
|
| 18 |
+
// for more explanations.
|
| 19 |
+
explicit PyRRef(const py::object& value, const py::object& type_hint);
|
| 20 |
+
explicit PyRRef(c10::intrusive_ptr<RRef> rref);
|
| 21 |
+
PyRRef(const PyRRef&) = default;
|
| 22 |
+
~PyRRef();
|
| 23 |
+
|
| 24 |
+
bool isOwner() const;
|
| 25 |
+
bool confirmedByOwner() const;
|
| 26 |
+
WorkerInfo owner() const;
|
| 27 |
+
std::string ownerName() const;
|
| 28 |
+
py::object toHere(
|
| 29 |
+
const float timeoutSeconds =
|
| 30 |
+
torch::distributed::rpc::kUnsetRpcTimeout) const;
|
| 31 |
+
py::object localValue() const;
|
| 32 |
+
std::string str() const;
|
| 33 |
+
py::tuple pickle() const;
|
| 34 |
+
static PyRRef unpickle(const py::tuple& t);
|
| 35 |
+
c10::IValue toIValue() const;
|
| 36 |
+
// Future that is associated with the creation of this RRef on the remote end.
|
| 37 |
+
// This is only used to get the future corresponding to the rref for profiling
|
| 38 |
+
// use cases.
|
| 39 |
+
c10::intrusive_ptr<JitFuture> getFuture() const;
|
| 40 |
+
// Keeps track of the future responsible for profiling owner creation
|
| 41 |
+
// acknowledgement
|
| 42 |
+
c10::intrusive_ptr<JitFuture> getProfilingFuture() const;
|
| 43 |
+
// Sets the future responsible for profiling owner creation acknowledgement.
|
| 44 |
+
// This future is set from python to be a future that returns when profiling
|
| 45 |
+
// callbacks have been run.
|
| 46 |
+
void setProfilingFuture(c10::intrusive_ptr<JitFuture> profilingFuture);
|
| 47 |
+
|
| 48 |
+
// create a proxy on this RRef, which can be used to launch RPC on the owner
|
| 49 |
+
// of this RRef to run functions on the object referenced by this RRef.
|
| 50 |
+
py::object createRRefProxy(
|
| 51 |
+
const RRefProxyType& mode,
|
| 52 |
+
float timeoutSeconds = rpc::kUnsetRpcTimeout) const;
|
| 53 |
+
|
| 54 |
+
// get the type of the data object referenced by this RRef. Timeout argument
|
| 55 |
+
// is only used in the first invocation of this function as an argument to the
|
| 56 |
+
// RPC to the owner node of the RRef.
|
| 57 |
+
py::object getRRefType(
|
| 58 |
+
float timeout = rpc::kUnsetRpcTimeout,
|
| 59 |
+
bool blocking = true);
|
| 60 |
+
|
| 61 |
+
// Run the backward pass with the RRef as the root.
|
| 62 |
+
void backward(int64_t autogradContextId, bool retainGraph);
|
| 63 |
+
|
| 64 |
+
// Helper static function to run backward on a given rref.
|
| 65 |
+
static void backward(
|
| 66 |
+
int64_t autogradContextId,
|
| 67 |
+
bool retainGraph,
|
| 68 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 69 |
+
|
| 70 |
+
// Specialization of backward if the rref is an OwnerRRef.
|
| 71 |
+
static void backwardOwnerRRef(
|
| 72 |
+
int64_t autogradContextId,
|
| 73 |
+
bool retainGraph,
|
| 74 |
+
IValue value);
|
| 75 |
+
|
| 76 |
+
private:
|
| 77 |
+
c10::intrusive_ptr<RRef> rref_;
|
| 78 |
+
std::optional<c10::intrusive_ptr<JitFuture>> profilingFuture_;
|
| 79 |
+
std::optional<py::object> type_;
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
} // namespace rpc
|
| 83 |
+
} // namespace distributed
|
| 84 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_call.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
|
| 6 |
+
namespace torch::distributed::rpc {
|
| 7 |
+
|
| 8 |
+
// RPC call representing calling a Python function over RPC.
|
| 9 |
+
class TORCH_API PythonCall final : public RpcCommandBase {
|
| 10 |
+
public:
|
| 11 |
+
PythonCall(SerializedPyObj&& serializedPyObj, bool isAsyncExecution);
|
| 12 |
+
|
| 13 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 14 |
+
|
| 15 |
+
static std::unique_ptr<PythonCall> fromMessage(const Message& message);
|
| 16 |
+
|
| 17 |
+
const SerializedPyObj& serializedPyObj() const;
|
| 18 |
+
|
| 19 |
+
inline bool isAsyncExecution() const {
|
| 20 |
+
return isAsyncExecution_;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
private:
|
| 24 |
+
SerializedPyObj serializedPyObj_;
|
| 25 |
+
const bool isAsyncExecution_;
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_functions.h
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/py_rref.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 5 |
+
#include <torch/csrc/jit/python/pybind_utils.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::distributed::rpc {
|
| 9 |
+
|
| 10 |
+
// Converts an internal ivalue::Future of Message into a user-facing
|
| 11 |
+
// ivalue::Future of py::object type by creating a new ivalue::Future and call
|
| 12 |
+
// its markCompleted as a callback in the given ivalue::Future.
|
| 13 |
+
// If hasValue is true, the Message will be converted into a py::object and then
|
| 14 |
+
// wrap it with an IValue. If hasValue is false, this ivalue::Future is only
|
| 15 |
+
// used for signaling and launching callbacks. In this case, the message will be
|
| 16 |
+
// discarded and then set the ivalue::Future using an empty IValue or the given
|
| 17 |
+
// FutureError if there is an error.
|
| 18 |
+
c10::intrusive_ptr<JitFuture> toPyJitFuture(
|
| 19 |
+
const c10::intrusive_ptr<JitFuture>& messageJitFuture,
|
| 20 |
+
bool hasValue = true);
|
| 21 |
+
|
| 22 |
+
c10::intrusive_ptr<JitFuture> pyRpcBuiltin(
|
| 23 |
+
const WorkerInfo& dst,
|
| 24 |
+
const std::string& opName,
|
| 25 |
+
const py::args& args,
|
| 26 |
+
const py::kwargs& kwargs,
|
| 27 |
+
const float rpcTimeoutSeconds);
|
| 28 |
+
|
| 29 |
+
c10::intrusive_ptr<JitFuture> pyRpcPythonUdf(
|
| 30 |
+
const WorkerInfo& dst,
|
| 31 |
+
std::string& pickledPythonUDF,
|
| 32 |
+
std::vector<torch::Tensor>& tensors,
|
| 33 |
+
const float rpcTimeoutSeconds,
|
| 34 |
+
const bool isAsyncExecution);
|
| 35 |
+
|
| 36 |
+
c10::intrusive_ptr<JitFuture> pyRpcTorchscript(
|
| 37 |
+
const std::string& dstWorkerName,
|
| 38 |
+
const std::string& qualifiedNameStr,
|
| 39 |
+
const py::tuple& argsTuple,
|
| 40 |
+
const py::dict& kwargsDict,
|
| 41 |
+
const float rpcTimeoutSeconds,
|
| 42 |
+
const bool isAsyncExecution);
|
| 43 |
+
|
| 44 |
+
PyRRef pyRemoteBuiltin(
|
| 45 |
+
const WorkerInfo& dst,
|
| 46 |
+
const std::string& opName,
|
| 47 |
+
const float rpcTimeoutSeconds,
|
| 48 |
+
const py::args& args,
|
| 49 |
+
const py::kwargs& kwargs);
|
| 50 |
+
|
| 51 |
+
PyRRef pyRemotePythonUdf(
|
| 52 |
+
const WorkerInfo& dst,
|
| 53 |
+
std::string& pickledPythonUDF,
|
| 54 |
+
std::vector<torch::Tensor>& tensors,
|
| 55 |
+
const float rpcTimeoutSeconds,
|
| 56 |
+
const bool isAsyncExecution);
|
| 57 |
+
|
| 58 |
+
PyRRef pyRemoteTorchscript(
|
| 59 |
+
const std::string& dstWorkerName,
|
| 60 |
+
const std::string& qualifiedNameStr,
|
| 61 |
+
const float rpcTimeoutSeconds,
|
| 62 |
+
const bool isAsyncExecution,
|
| 63 |
+
const py::args& args,
|
| 64 |
+
const py::kwargs& kwargs);
|
| 65 |
+
|
| 66 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_remote_call.h
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 6 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
namespace torch::distributed::rpc {
|
| 10 |
+
|
| 11 |
+
class TORCH_API PythonRemoteCall : public RpcCommandBase {
|
| 12 |
+
public:
|
| 13 |
+
PythonRemoteCall(
|
| 14 |
+
SerializedPyObj&& serializedPyObj,
|
| 15 |
+
at::IValue retRRefId,
|
| 16 |
+
at::IValue retForkId,
|
| 17 |
+
const bool isAsyncExecution);
|
| 18 |
+
|
| 19 |
+
inline const SerializedPyObj& serializedPyObj() const {
|
| 20 |
+
return serializedPyObj_;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
inline const at::IValue& retRRefId() const {
|
| 24 |
+
return retRRefId_;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
inline const at::IValue& retForkId() const {
|
| 28 |
+
return retForkId_;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
inline bool isAsyncExecution() const {
|
| 32 |
+
return isAsyncExecution_;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 36 |
+
static std::unique_ptr<PythonRemoteCall> fromMessage(const Message& message);
|
| 37 |
+
|
| 38 |
+
private:
|
| 39 |
+
SerializedPyObj serializedPyObj_;
|
| 40 |
+
const at::IValue retRRefId_;
|
| 41 |
+
const at::IValue retForkId_;
|
| 42 |
+
const bool isAsyncExecution_;
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_resp.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
|
| 6 |
+
namespace torch::distributed::rpc {
|
| 7 |
+
|
| 8 |
+
// RPC call representing the response of a Python UDF over RPC.
|
| 9 |
+
class TORCH_API PythonResp final : public RpcCommandBase {
|
| 10 |
+
public:
|
| 11 |
+
explicit PythonResp(SerializedPyObj&& serializedPyObj);
|
| 12 |
+
|
| 13 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 14 |
+
|
| 15 |
+
static std::unique_ptr<PythonResp> fromMessage(const Message& message);
|
| 16 |
+
|
| 17 |
+
const SerializedPyObj& serializedPyObj() const;
|
| 18 |
+
|
| 19 |
+
private:
|
| 20 |
+
SerializedPyObj serializedPyObj_;
|
| 21 |
+
};
|
| 22 |
+
|
| 23 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/python_rpc_handler.h
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
#include <torch/csrc/jit/frontend/script_type_parser.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::distributed::rpc {
|
| 9 |
+
|
| 10 |
+
// Singleton class provides interface to execute python UDF remote call
|
| 11 |
+
// and deserialize the returned results by running python function
|
| 12 |
+
// in internal_rpc_utilities.
|
| 13 |
+
// The singleton object is constructed at first when RPC agent is
|
| 14 |
+
// constructed, where the python function in
|
| 15 |
+
// torch/distributed/internal_rpc_utils.py are imported only once.
|
| 16 |
+
class PYBIND11_EXPORT PythonRpcHandler {
|
| 17 |
+
public:
|
| 18 |
+
struct RRefProxyFunctions {
|
| 19 |
+
py::object rrefProxyCtor_;
|
| 20 |
+
py::object rpcSync_;
|
| 21 |
+
py::object rpcAsync_;
|
| 22 |
+
py::object remote_;
|
| 23 |
+
};
|
| 24 |
+
|
| 25 |
+
struct RRefTypeFunctions {
|
| 26 |
+
py::object onOwner_;
|
| 27 |
+
py::object onUser_;
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
static PythonRpcHandler& getInstance();
|
| 31 |
+
|
| 32 |
+
// Run a pickled Python UDF and return the result py::object
|
| 33 |
+
py::object runPythonUdf(const py::object& pythonUdf);
|
| 34 |
+
|
| 35 |
+
// Serialized a py::object into a string
|
| 36 |
+
SerializedPyObj serialize(const py::object& obj);
|
| 37 |
+
|
| 38 |
+
// Deserialize a string into a py::object
|
| 39 |
+
py::object deserialize(const SerializedPyObj& serializedObj);
|
| 40 |
+
|
| 41 |
+
// Check if obj is RemoteException, then throw it
|
| 42 |
+
void handleException(const py::object& obj);
|
| 43 |
+
// Alternative if the caller is already holding the GIL.
|
| 44 |
+
void handleExceptionGILHeld(const py::object& obj);
|
| 45 |
+
// Check if obj is an RemoteException instance.
|
| 46 |
+
bool isRemoteException(const py::object& obj);
|
| 47 |
+
|
| 48 |
+
// Explicitly clean up py::objects to avoid segment faults when
|
| 49 |
+
// py::objects with CPython are cleaned up later at program exit
|
| 50 |
+
// See similar issues reported https://github.com/pybind/pybind11/issues/1598
|
| 51 |
+
// and https://github.com/pybind/pybind11/issues/1493
|
| 52 |
+
// Our local tests also caught this segment faults if py::objects are cleaned
|
| 53 |
+
// up at program exit. The explanation is: CPython cleans up most critical
|
| 54 |
+
// utilities before cleaning up PythonRpcHandler singleton, so when
|
| 55 |
+
// PythonRpcHandler singleton cleans up py::objects and call dec_ref(), it
|
| 56 |
+
// will crash.
|
| 57 |
+
// The solution is to clean up py::objects earlier when Rpc agent join().
|
| 58 |
+
// Be note that py::objects can not be cleaned up when Rpc agent is destroyed
|
| 59 |
+
// as well, as Rpc agent is global variable and it will have same issue as
|
| 60 |
+
// PythonRpcHandler.
|
| 61 |
+
void cleanup();
|
| 62 |
+
|
| 63 |
+
std::shared_ptr<torch::jit::CompilationUnit> jitCompilationUnit();
|
| 64 |
+
|
| 65 |
+
// Parse the string to recover the jit_type, this is used for RRef python
|
| 66 |
+
// pickling/unpickling type recovery. The type string inference rule is as
|
| 67 |
+
// follows:
|
| 68 |
+
// 1. first try to parse if this is primitive types.
|
| 69 |
+
// i.e. TensorType, IntType, PyObjectType, etc.
|
| 70 |
+
// 2. if not primitive type, we query the python_cu to see if it is a
|
| 71 |
+
// class type or interface type registered in python
|
| 72 |
+
// We use a ScriptTypeParser instance with custom PythonTypeResolver
|
| 73 |
+
// to resolve types according to the above rules.
|
| 74 |
+
TypePtr parseTypeFromStr(const std::string& typeStr);
|
| 75 |
+
|
| 76 |
+
// Return a set of Python functions for RRef helpers.
|
| 77 |
+
const RRefProxyFunctions& getRRefProxyFunctions() const;
|
| 78 |
+
|
| 79 |
+
// Return a set of Python functions to retrieve the type of the object
|
| 80 |
+
// referenced by a given RRef.
|
| 81 |
+
const RRefTypeFunctions& getRRefTypeFunctions() const;
|
| 82 |
+
|
| 83 |
+
PythonRpcHandler(const PythonRpcHandler&) = delete;
|
| 84 |
+
PythonRpcHandler& operator=(const PythonRpcHandler&) = delete;
|
| 85 |
+
PythonRpcHandler(PythonRpcHandler&&) = delete;
|
| 86 |
+
PythonRpcHandler& operator=(PythonRpcHandler&&) = delete;
|
| 87 |
+
|
| 88 |
+
private:
|
| 89 |
+
void init();
|
| 90 |
+
PythonRpcHandler();
|
| 91 |
+
~PythonRpcHandler() = default;
|
| 92 |
+
|
| 93 |
+
// Ref to `torch.distributed.rpc.internal._run_function`.
|
| 94 |
+
py::object pyRunFunction_;
|
| 95 |
+
|
| 96 |
+
// Ref to `torch.distributed.rpc.internal.serialize`.
|
| 97 |
+
py::object pySerialize_;
|
| 98 |
+
|
| 99 |
+
// Ref to `torch.distributed.rpc.internal.deserialize`.
|
| 100 |
+
py::object pyDeserialize_;
|
| 101 |
+
|
| 102 |
+
// Ref to 'torch.distributed.rpc.internal._handle_exception'
|
| 103 |
+
py::object pyHandleException_;
|
| 104 |
+
|
| 105 |
+
// Python functions for RRef proxy
|
| 106 |
+
RRefProxyFunctions rrefProxyFunctions_;
|
| 107 |
+
|
| 108 |
+
// Ref to 'torch.distributed.rpc.api._rref_typeof_on_'
|
| 109 |
+
RRefTypeFunctions rrefTypeFunctions_;
|
| 110 |
+
|
| 111 |
+
// Shared ptr to python compilation unit in jit, it is constructed in python
|
| 112 |
+
// side (see _python_cu = torch._C.CompilationUnit() in jit/__init__.py)
|
| 113 |
+
// and imported in C++ (see get_python_cu() in
|
| 114 |
+
// csrc/jit/python/pybind_utils.h). We import the compilation unit here only
|
| 115 |
+
// once for less cost and thread safety.
|
| 116 |
+
std::shared_ptr<torch::jit::CompilationUnit> jitCompilationUnit_;
|
| 117 |
+
|
| 118 |
+
// jit type parser to parse type_str back to TypePtr for RRef type
|
| 119 |
+
// recovery when pickling and unpickling RRef
|
| 120 |
+
std::shared_ptr<jit::ScriptTypeParser> typeParser_;
|
| 121 |
+
|
| 122 |
+
// Indicates whether or not we have properly initialized the handler.
|
| 123 |
+
bool initialized_;
|
| 124 |
+
|
| 125 |
+
// Lock to protect initialization.
|
| 126 |
+
std::mutex init_lock_;
|
| 127 |
+
};
|
| 128 |
+
|
| 129 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback.h
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::distributed::rpc {
|
| 6 |
+
|
| 7 |
+
// Functor which is invoked to process an RPC message. This is an abstract class
|
| 8 |
+
// with some common functionality across all request handlers. Users need to
|
| 9 |
+
// implement this interface to perform the actual business logic.
|
| 10 |
+
class TORCH_API RequestCallback {
|
| 11 |
+
public:
|
| 12 |
+
// Invoke the callback.
|
| 13 |
+
c10::intrusive_ptr<JitFuture> operator()(
|
| 14 |
+
Message& request,
|
| 15 |
+
std::vector<c10::Stream> streams) const;
|
| 16 |
+
|
| 17 |
+
virtual ~RequestCallback() = default;
|
| 18 |
+
|
| 19 |
+
protected:
|
| 20 |
+
// RpcAgent implementation should invoke ``RequestCallback`` to process
|
| 21 |
+
// received requests. There is no restriction on the implementation's
|
| 22 |
+
// threading model. This function takes an rvalue reference of the Message
|
| 23 |
+
// object. It is expected to return the future to a response message or
|
| 24 |
+
// message containing an exception. Different rpc agent implementations are
|
| 25 |
+
// expected to ensure delivery of the response/exception based on their
|
| 26 |
+
// implementation specific mechanisms.
|
| 27 |
+
virtual c10::intrusive_ptr<JitFuture> processMessage(
|
| 28 |
+
Message& request,
|
| 29 |
+
std::vector<c10::Stream> streams) const = 0;
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_impl.h
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/request_callback_no_python.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 6 |
+
#include <torch/csrc/jit/python/pybind.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::distributed::rpc {
|
| 9 |
+
|
| 10 |
+
class TORCH_API RequestCallbackImpl : public RequestCallbackNoPython {
|
| 11 |
+
public:
|
| 12 |
+
std::unique_ptr<RpcCommandBase> deserializePythonRpcCommand(
|
| 13 |
+
std::unique_ptr<RpcCommandBase> rpc,
|
| 14 |
+
const MessageType& messageType) const override;
|
| 15 |
+
|
| 16 |
+
c10::intrusive_ptr<JitFuture> processPythonCall(
|
| 17 |
+
RpcCommandBase& rpc,
|
| 18 |
+
const std::vector<c10::Stream>& streams) const override;
|
| 19 |
+
|
| 20 |
+
c10::intrusive_ptr<JitFuture> processScriptCall(
|
| 21 |
+
RpcCommandBase& rpc,
|
| 22 |
+
const std::vector<c10::Stream>& streams) const override;
|
| 23 |
+
|
| 24 |
+
c10::intrusive_ptr<JitFuture> processScriptRemoteCall(
|
| 25 |
+
RpcCommandBase& rpc,
|
| 26 |
+
const std::vector<c10::Stream>& streams) const override;
|
| 27 |
+
|
| 28 |
+
c10::intrusive_ptr<JitFuture> processPythonRemoteCall(
|
| 29 |
+
RpcCommandBase& rpc,
|
| 30 |
+
const std::vector<c10::Stream>& streams) const override;
|
| 31 |
+
|
| 32 |
+
c10::intrusive_ptr<JitFuture> processPythonRRefFetchCall(
|
| 33 |
+
RpcCommandBase& rpc) const override;
|
| 34 |
+
|
| 35 |
+
void handleRRefDelete(c10::intrusive_ptr<RRef>& rref) const override;
|
| 36 |
+
|
| 37 |
+
c10::intrusive_ptr<JitFuture> processRpcWithErrors(
|
| 38 |
+
RpcCommandBase& rpc,
|
| 39 |
+
const MessageType& messageType,
|
| 40 |
+
const std::vector<c10::Stream>& streams) const override;
|
| 41 |
+
|
| 42 |
+
bool cudaAvailable() const override;
|
| 43 |
+
|
| 44 |
+
c10::intrusive_ptr<JitFuture> processRRefBackward(
|
| 45 |
+
RpcCommandBase& rpc) const override;
|
| 46 |
+
|
| 47 |
+
// Helpers to run user-defined functions, operators and other computations.
|
| 48 |
+
|
| 49 |
+
c10::intrusive_ptr<JitFuture> runJitFunction(
|
| 50 |
+
const c10::QualifiedName& name,
|
| 51 |
+
std::vector<at::IValue>& stack,
|
| 52 |
+
const std::vector<c10::Stream>& streams,
|
| 53 |
+
bool isAsyncExecution) const;
|
| 54 |
+
|
| 55 |
+
c10::intrusive_ptr<JitFuture> runPythonFunction(
|
| 56 |
+
const py::object& function,
|
| 57 |
+
const std::vector<c10::Stream>& streams,
|
| 58 |
+
bool isAsyncExecution) const;
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/request_callback_no_python.h
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/request_callback.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/script_call.h>
|
| 8 |
+
#include <torch/csrc/distributed/rpc/script_remote_call.h>
|
| 9 |
+
|
| 10 |
+
namespace torch::distributed::rpc {
|
| 11 |
+
|
| 12 |
+
// RequestCallback implementation with no Python dependencies.
|
| 13 |
+
class TORCH_API RequestCallbackNoPython : public RequestCallback {
|
| 14 |
+
public:
|
| 15 |
+
c10::intrusive_ptr<JitFuture> processMessage(
|
| 16 |
+
Message& request,
|
| 17 |
+
std::vector<c10::Stream> streams) const override;
|
| 18 |
+
|
| 19 |
+
protected:
|
| 20 |
+
virtual std::unique_ptr<RpcCommandBase> deserializePythonRpcCommand(
|
| 21 |
+
std::unique_ptr<RpcCommandBase> rpc,
|
| 22 |
+
const MessageType& messageType) const;
|
| 23 |
+
|
| 24 |
+
virtual c10::intrusive_ptr<JitFuture> processScriptCall(
|
| 25 |
+
RpcCommandBase& rpc,
|
| 26 |
+
const std::vector<c10::Stream>& streams) const;
|
| 27 |
+
|
| 28 |
+
virtual c10::intrusive_ptr<JitFuture> processPythonCall(
|
| 29 |
+
RpcCommandBase& rpc,
|
| 30 |
+
const std::vector<c10::Stream>& streams) const;
|
| 31 |
+
|
| 32 |
+
c10::intrusive_ptr<JitFuture> assignOwnerRRef(
|
| 33 |
+
const RRefId& rrefId,
|
| 34 |
+
const RRefId& forkId,
|
| 35 |
+
const c10::intrusive_ptr<JitFuture>& valueFuture) const;
|
| 36 |
+
|
| 37 |
+
virtual c10::intrusive_ptr<JitFuture> processScriptRemoteCall(
|
| 38 |
+
RpcCommandBase& rpc,
|
| 39 |
+
const std::vector<c10::Stream>& streams) const;
|
| 40 |
+
|
| 41 |
+
virtual c10::intrusive_ptr<JitFuture> processPythonRemoteCall(
|
| 42 |
+
RpcCommandBase& rpc,
|
| 43 |
+
const std::vector<c10::Stream>& streams) const;
|
| 44 |
+
|
| 45 |
+
c10::intrusive_ptr<JitFuture> retrieveOwnerRRef(const RRefId& rrefId) const;
|
| 46 |
+
|
| 47 |
+
c10::intrusive_ptr<JitFuture> processScriptRRefFetchCall(
|
| 48 |
+
RpcCommandBase& rpc) const;
|
| 49 |
+
|
| 50 |
+
virtual c10::intrusive_ptr<JitFuture> processPythonRRefFetchCall(
|
| 51 |
+
RpcCommandBase& rpc) const;
|
| 52 |
+
|
| 53 |
+
c10::intrusive_ptr<JitFuture> processRRefUserDelete(
|
| 54 |
+
RpcCommandBase& rpc) const;
|
| 55 |
+
|
| 56 |
+
c10::intrusive_ptr<JitFuture> processRRefChildAccept(
|
| 57 |
+
RpcCommandBase& rpc) const;
|
| 58 |
+
|
| 59 |
+
c10::intrusive_ptr<JitFuture> processRRefForkRequest(
|
| 60 |
+
RpcCommandBase& rpc) const;
|
| 61 |
+
|
| 62 |
+
c10::intrusive_ptr<JitFuture> processForwardAutogradReq(
|
| 63 |
+
RpcCommandBase& rpc,
|
| 64 |
+
const std::vector<c10::Stream>& streams) const;
|
| 65 |
+
|
| 66 |
+
c10::intrusive_ptr<JitFuture> processBackwardAutogradReq(
|
| 67 |
+
RpcCommandBase& rpc,
|
| 68 |
+
const std::vector<c10::Stream>& streams) const;
|
| 69 |
+
|
| 70 |
+
c10::intrusive_ptr<JitFuture> processCleanupAutogradContextReq(
|
| 71 |
+
RpcCommandBase& rpc) const;
|
| 72 |
+
|
| 73 |
+
c10::intrusive_ptr<JitFuture> processRunWithProfilingReq(
|
| 74 |
+
RpcCommandBase& rpc) const;
|
| 75 |
+
|
| 76 |
+
virtual void handleRRefDelete(c10::intrusive_ptr<RRef>& rref) const;
|
| 77 |
+
|
| 78 |
+
c10::intrusive_ptr<JitFuture> processRpc(
|
| 79 |
+
RpcCommandBase& rpc,
|
| 80 |
+
const MessageType& messageType,
|
| 81 |
+
const std::vector<c10::Stream>& streams) const;
|
| 82 |
+
|
| 83 |
+
virtual c10::intrusive_ptr<JitFuture> processRpcWithErrors(
|
| 84 |
+
RpcCommandBase& rpc,
|
| 85 |
+
const MessageType& messageType,
|
| 86 |
+
const std::vector<c10::Stream>& streams) const;
|
| 87 |
+
|
| 88 |
+
c10::intrusive_ptr<Message> handleError(
|
| 89 |
+
const std::exception& e,
|
| 90 |
+
const MessageType messageType,
|
| 91 |
+
int64_t messageId) const;
|
| 92 |
+
|
| 93 |
+
virtual bool cudaAvailable() const;
|
| 94 |
+
|
| 95 |
+
virtual c10::intrusive_ptr<JitFuture> processRRefBackward(
|
| 96 |
+
RpcCommandBase& rpc) const;
|
| 97 |
+
|
| 98 |
+
// Helpers to run user-defined functions, operators and other computations.
|
| 99 |
+
|
| 100 |
+
c10::intrusive_ptr<JitFuture> runJitOperator(
|
| 101 |
+
const jit::Operator& op,
|
| 102 |
+
std::vector<at::IValue>& stack,
|
| 103 |
+
const std::vector<c10::Stream>& streams) const;
|
| 104 |
+
|
| 105 |
+
// Helpers to convert various kinds of objects into already-completed futures.
|
| 106 |
+
|
| 107 |
+
c10::intrusive_ptr<JitFuture> asFuture(IValue value, TypePtr type) const;
|
| 108 |
+
|
| 109 |
+
c10::intrusive_ptr<JitFuture> asFuture(
|
| 110 |
+
c10::intrusive_ptr<Message> message) const;
|
| 111 |
+
|
| 112 |
+
c10::intrusive_ptr<JitFuture> asFuture(std::exception_ptr err) const;
|
| 113 |
+
};
|
| 114 |
+
|
| 115 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::distributed::rpc {
|
| 6 |
+
|
| 7 |
+
PyMethodDef* python_functions();
|
| 8 |
+
|
| 9 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_agent.h
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/request_callback.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 6 |
+
|
| 7 |
+
#include <algorithm>
|
| 8 |
+
#include <cctype>
|
| 9 |
+
#include <chrono>
|
| 10 |
+
#include <condition_variable>
|
| 11 |
+
#include <mutex>
|
| 12 |
+
#include <thread>
|
| 13 |
+
|
| 14 |
+
namespace torch::distributed::rpc {
|
| 15 |
+
|
| 16 |
+
using DeviceMap = std::unordered_map<c10::Device, c10::Device>;
|
| 17 |
+
|
| 18 |
+
// Default RPC timeout
|
| 19 |
+
constexpr float kDefaultRpcTimeoutSeconds = 60;
|
| 20 |
+
// Unset RPC timeout. This is the value agent::send() will have if user does not
|
| 21 |
+
// pass in a specific timeout, and indicates that we must use the default
|
| 22 |
+
// timeout for RPCs.
|
| 23 |
+
constexpr float kUnsetRpcTimeout = -1;
|
| 24 |
+
constexpr auto kDefaultInitMethod = "env://";
|
| 25 |
+
constexpr float kSecToMsConversion = 1000;
|
| 26 |
+
constexpr auto kRpcTimeoutErrorStr =
|
| 27 |
+
"RPC ran for more than set timeout ({} ms) and will now be marked with an error";
|
| 28 |
+
|
| 29 |
+
using steady_clock_time_point =
|
| 30 |
+
std::chrono::time_point<std::chrono::steady_clock>;
|
| 31 |
+
// Input is qualified name string, output is JIT StrongTypePtr
|
| 32 |
+
// Same as jit::TypeResolver, did not import jit::TypeResolver to here
|
| 33 |
+
// because it could introduce cyclic dependencies.
|
| 34 |
+
using TypeResolver =
|
| 35 |
+
std::function<c10::StrongTypePtr(const c10::QualifiedName&)>;
|
| 36 |
+
|
| 37 |
+
struct TORCH_API RpcBackendOptions {
|
| 38 |
+
RpcBackendOptions()
|
| 39 |
+
: RpcBackendOptions(kDefaultRpcTimeoutSeconds, kDefaultInitMethod) {}
|
| 40 |
+
|
| 41 |
+
RpcBackendOptions(float rpcTimeoutSeconds, std::string initMethod)
|
| 42 |
+
: rpcTimeoutSeconds(rpcTimeoutSeconds),
|
| 43 |
+
initMethod(std::move(initMethod)) {
|
| 44 |
+
TORCH_CHECK(rpcTimeoutSeconds >= 0, "RPC Timeout must be non-negative");
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
float rpcTimeoutSeconds;
|
| 48 |
+
std::string initMethod;
|
| 49 |
+
};
|
| 50 |
+
|
| 51 |
+
// A globally unique ID to identify an RpcAgent
|
| 52 |
+
struct TORCH_API WorkerInfo : torch::CustomClassHolder {
|
| 53 |
+
WorkerInfo(std::string name, int64_t id);
|
| 54 |
+
|
| 55 |
+
WorkerInfo(std::string name, worker_id_t id);
|
| 56 |
+
|
| 57 |
+
bool operator==(const WorkerInfo& rhs) {
|
| 58 |
+
return (id_ == rhs.id_) && (name_ == rhs.name_);
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
static constexpr size_t MAX_NAME_LEN = 128;
|
| 62 |
+
|
| 63 |
+
const std::string name_;
|
| 64 |
+
const worker_id_t id_;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
struct TORCH_API RegisterWorkerInfoOnce {
|
| 68 |
+
RegisterWorkerInfoOnce();
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
TORCH_API std::ostream& operator<<(
|
| 72 |
+
std::ostream& os,
|
| 73 |
+
const WorkerInfo& workerInfo);
|
| 74 |
+
|
| 75 |
+
// Struct for options to configure the RPC Retry protocol.
|
| 76 |
+
struct TORCH_API RpcRetryOptions {
|
| 77 |
+
// Using a default constructor like all other Options structs in the RPC
|
| 78 |
+
// codebase. TORCH_CHECKs for input validation are done in the
|
| 79 |
+
// sendWithRetries function.
|
| 80 |
+
RpcRetryOptions() = default;
|
| 81 |
+
// Maximum number of times we will retry the RPC
|
| 82 |
+
int maxRetries{5};
|
| 83 |
+
// Initial duration between consecutive RPC send attempts
|
| 84 |
+
std::chrono::milliseconds rpcRetryDuration{std::chrono::milliseconds(1000)};
|
| 85 |
+
// Constant for exponential backoff used while calculating future wait
|
| 86 |
+
// durations
|
| 87 |
+
float retryBackoff{1.5};
|
| 88 |
+
};
|
| 89 |
+
|
| 90 |
+
// Struct that stores all the metadata needed to retry a given RPC.
|
| 91 |
+
struct TORCH_API RpcRetryInfo {
|
| 92 |
+
RpcRetryInfo(
|
| 93 |
+
const WorkerInfo& to,
|
| 94 |
+
c10::intrusive_ptr<Message> message,
|
| 95 |
+
c10::intrusive_ptr<JitFuture> originalFuture,
|
| 96 |
+
int retryCount,
|
| 97 |
+
RpcRetryOptions options)
|
| 98 |
+
: to_(to),
|
| 99 |
+
message_(std::move(message)),
|
| 100 |
+
originalFuture_(std::move(originalFuture)),
|
| 101 |
+
retryCount_(retryCount),
|
| 102 |
+
options_(options) {}
|
| 103 |
+
|
| 104 |
+
const WorkerInfo& to_;
|
| 105 |
+
c10::intrusive_ptr<Message> message_;
|
| 106 |
+
// Future that is returned to the caller of sendWithRetries().
|
| 107 |
+
c10::intrusive_ptr<JitFuture> originalFuture_;
|
| 108 |
+
// Number of send attempts completed so far.
|
| 109 |
+
int retryCount_;
|
| 110 |
+
RpcRetryOptions options_;
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
// ``RpcAgent`` is the base class for sending and receiving RPC messages. It
|
| 114 |
+
// provides a unified ``send`` API for both request and response messages, and
|
| 115 |
+
// will invoke the given ``RequestCallback`` to process received requests. It
|
| 116 |
+
// should immediately become ready to serve request and accept response after
|
| 117 |
+
// construction.
|
| 118 |
+
class TORCH_API RpcAgent {
|
| 119 |
+
public:
|
| 120 |
+
// `WorkerInfo` is the globally unique identifier for this RpcAgent instance.
|
| 121 |
+
// It contains a ``name_`` field and an ``id_`` field. ``name_`` is the
|
| 122 |
+
// globally unique name for this ``RpcAgent``. It is up to the ``RpcAgent``
|
| 123 |
+
// implementation to determine how to resolve names. ``id_`` is the globally
|
| 124 |
+
// unique ID for this ``RpcAgent``. This should be determined by the
|
| 125 |
+
// ``RpcAgent`` implementation.
|
| 126 |
+
// The ``RequestCallback`` will be invoked to handle received requests. This
|
| 127 |
+
// ``RpcAgent`` base class makes no assumption on the thread-safeness of the
|
| 128 |
+
// ``RequestCallback``. ``RpcAgent`` implementations need to make sure that
|
| 129 |
+
// its threading model conform to ``RequestCallback``'s requirement.
|
| 130 |
+
// NB: RpcAgent implementations should not start serving requests until
|
| 131 |
+
// ``start()`` is called, as there could be other contexts that have not been
|
| 132 |
+
// initialized yet at this time.
|
| 133 |
+
RpcAgent(
|
| 134 |
+
WorkerInfo id,
|
| 135 |
+
std::unique_ptr<RequestCallback> cb,
|
| 136 |
+
std::chrono::milliseconds rpcTimeout);
|
| 137 |
+
|
| 138 |
+
virtual ~RpcAgent();
|
| 139 |
+
|
| 140 |
+
// Send a message to the ``RpcAgent`` of id ``to`` and returns a
|
| 141 |
+
// ``JitFuture`` ptr. The implementation must be asynchronous, i.e., it
|
| 142 |
+
// cannot block until it receives the response.
|
| 143 |
+
//
|
| 144 |
+
// If ``message.isRequest()`` is true, the ``JitFuture`` will be
|
| 145 |
+
// completed when the response arrives. For other message types, the Future
|
| 146 |
+
// should be ignored by the caller.
|
| 147 |
+
virtual c10::intrusive_ptr<JitFuture> send(
|
| 148 |
+
const WorkerInfo& to,
|
| 149 |
+
c10::intrusive_ptr<Message> message,
|
| 150 |
+
const float rpcTimeoutSeconds = kUnsetRpcTimeout,
|
| 151 |
+
const DeviceMap& deviceMap = {}) = 0;
|
| 152 |
+
|
| 153 |
+
// Retries sending the message up to maxRetries times until an ACK is
|
| 154 |
+
// received. The duration between consecutive sends is increased over
|
| 155 |
+
// time using an exponential backoff algorithm.
|
| 156 |
+
//
|
| 157 |
+
// Sends ``message`` to the ``RpcAgent`` of id ``to`` and returns a
|
| 158 |
+
// ``JitFuture`` ptr, just like send(). Caller can specify the maximum
|
| 159 |
+
// number of retries for this RPC (default is 5), initial duration between
|
| 160 |
+
// sends (default is 1000ms), and backoff constant (default is 1.5) by
|
| 161 |
+
// passing in the RpcRetryOptions struct. This API might end up
|
| 162 |
+
// executing a method twice on the remote end (it does not guarantee
|
| 163 |
+
// exactly-once semantics). Therefore, the user must ensure their requests
|
| 164 |
+
// are idempotent.
|
| 165 |
+
c10::intrusive_ptr<JitFuture> sendWithRetries(
|
| 166 |
+
const WorkerInfo& to,
|
| 167 |
+
c10::intrusive_ptr<Message> message,
|
| 168 |
+
RpcRetryOptions retryOptions = RpcRetryOptions());
|
| 169 |
+
|
| 170 |
+
// Return a reference to the ``WorkerInfo`` of this RpcAgent.
|
| 171 |
+
// NB: not using ``std::optional<const std::string&>`` here because we might
|
| 172 |
+
// need to create a separate RPC API lib and avoid forcing all ``RpcAgent``
|
| 173 |
+
// implementations to depend on libtorch.
|
| 174 |
+
const WorkerInfo& getWorkerInfo() const;
|
| 175 |
+
|
| 176 |
+
// Return a reference to the ``WorkerInfo`` of the given ``workerName``.
|
| 177 |
+
virtual const WorkerInfo& getWorkerInfo(
|
| 178 |
+
const std::string& workerName) const = 0;
|
| 179 |
+
|
| 180 |
+
virtual const WorkerInfo& getWorkerInfo(worker_id_t id) const = 0;
|
| 181 |
+
|
| 182 |
+
virtual std::vector<WorkerInfo> getWorkerInfos() const = 0;
|
| 183 |
+
|
| 184 |
+
// Retrieve the timeout for all RPCs.
|
| 185 |
+
inline std::chrono::milliseconds getRpcTimeout() const {
|
| 186 |
+
return rpcTimeout_.load();
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
// Set the timeout for all RPCs
|
| 190 |
+
inline void setRpcTimeout(const std::chrono::milliseconds& rpcTimeout) {
|
| 191 |
+
rpcTimeout_.store(rpcTimeout);
|
| 192 |
+
}
|
| 193 |
+
|
| 194 |
+
// Call sync and join all internal threads. This method should be called
|
| 195 |
+
// before every RPC process exits.
|
| 196 |
+
virtual void join(bool shutdown = false, float timeout = 0) = 0;
|
| 197 |
+
|
| 198 |
+
// Synchronize the this process with other ``RpcAgent`` processes. Block until
|
| 199 |
+
// all ``RpcAgent``s reach this method and send all pending messages.
|
| 200 |
+
virtual void sync() = 0;
|
| 201 |
+
|
| 202 |
+
// Sets up backend-agnostic state for accepting requests. Currently, this
|
| 203 |
+
// entails setting rpcAgentRunning_ to true, creating the retry thread, and
|
| 204 |
+
// calling the backend's startImpl.
|
| 205 |
+
void start();
|
| 206 |
+
|
| 207 |
+
// Derived classes must override this function to start accepting requests.
|
| 208 |
+
// This is used to initialize any backend-specific state. Users must call
|
| 209 |
+
// start, not startImpl, to initialize the RPC Agent.
|
| 210 |
+
virtual void startImpl() = 0;
|
| 211 |
+
|
| 212 |
+
// Stop accepting requests and shutdown the RPC framework as soon as possible
|
| 213 |
+
// by terminating all RPC threads.
|
| 214 |
+
void shutdown();
|
| 215 |
+
|
| 216 |
+
// Derived classes must override this function to start accepting requests.
|
| 217 |
+
// THis is used to clean up any backend-specific state. Users must call
|
| 218 |
+
// shutdown, not shutdownImpl, to shutdown the RPC Agent.
|
| 219 |
+
virtual void shutdownImpl() = 0;
|
| 220 |
+
|
| 221 |
+
// Check if current RPC agent is set.
|
| 222 |
+
static bool isCurrentRpcAgentSet();
|
| 223 |
+
|
| 224 |
+
// Retrieve the valid current RPC agent.
|
| 225 |
+
static std::shared_ptr<RpcAgent> getCurrentRpcAgent();
|
| 226 |
+
|
| 227 |
+
// Set the current RPC agent.
|
| 228 |
+
static void setCurrentRpcAgent(std::shared_ptr<RpcAgent> rpcAgent);
|
| 229 |
+
|
| 230 |
+
// Retrieve metrics as KV map
|
| 231 |
+
virtual std::unordered_map<std::string, std::string> getMetrics() = 0;
|
| 232 |
+
|
| 233 |
+
// Retrieve debug info in addition to metrics as KV map
|
| 234 |
+
virtual std::unordered_map<std::string, std::string> getDebugInfo();
|
| 235 |
+
|
| 236 |
+
// Flag to control whether GIL wait times
|
| 237 |
+
// should be profiled or not.
|
| 238 |
+
void enableGILProfiling(bool flag);
|
| 239 |
+
|
| 240 |
+
// Retrieve wheher we should profile GIL wait times or not.
|
| 241 |
+
bool isGILProfilingEnabled();
|
| 242 |
+
|
| 243 |
+
// Set type resolver that will be passed to JIT pickler to resolver type Ptr
|
| 244 |
+
// based on type str.
|
| 245 |
+
void setTypeResolver(std::shared_ptr<TypeResolver> typeResolver);
|
| 246 |
+
|
| 247 |
+
// Get the type resolver
|
| 248 |
+
std::shared_ptr<TypeResolver> getTypeResolver();
|
| 249 |
+
|
| 250 |
+
// Retrieves the device map for the provided destination worker.
|
| 251 |
+
virtual DeviceMap getDeviceMap(const WorkerInfo& dst) const;
|
| 252 |
+
|
| 253 |
+
// Retrieve the (non-CPU) devices that are supported by the agent.
|
| 254 |
+
virtual const std::vector<c10::Device>& getDevices() const;
|
| 255 |
+
|
| 256 |
+
protected:
|
| 257 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 258 |
+
const WorkerInfo workerInfo_;
|
| 259 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 260 |
+
const std::unique_ptr<RequestCallback> cb_;
|
| 261 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 262 |
+
std::atomic<std::chrono::milliseconds> rpcTimeout_;
|
| 263 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 264 |
+
std::atomic<bool> profilingEnabled_;
|
| 265 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 266 |
+
std::shared_ptr<TypeResolver> typeResolver_;
|
| 267 |
+
// Atomic boolean indicating whether this agent is running. It controls
|
| 268 |
+
// whether several background threads should be running. It is set in
|
| 269 |
+
// RpcAgent::start() and unset in the derived class shutdown().
|
| 270 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 271 |
+
std::atomic<bool> rpcAgentRunning_;
|
| 272 |
+
|
| 273 |
+
private:
|
| 274 |
+
static std::shared_ptr<RpcAgent> currentRpcAgent_;
|
| 275 |
+
// Add GIL wait time data point to metrics
|
| 276 |
+
virtual void addGilWaitTime(const std::chrono::microseconds gilWaitTime) = 0;
|
| 277 |
+
friend class PythonRpcHandler;
|
| 278 |
+
|
| 279 |
+
// Map that stores metadata for RPC's that may need to be re-tried as well as
|
| 280 |
+
// the timepoint at which we should re-try them.
|
| 281 |
+
std::map<
|
| 282 |
+
steady_clock_time_point,
|
| 283 |
+
std::unordered_set<std::shared_ptr<RpcRetryInfo>>>
|
| 284 |
+
rpcRetryMap_;
|
| 285 |
+
|
| 286 |
+
// Thread that checks for retryable RPC's in the rpcRetryMap_ and sleeps until
|
| 287 |
+
// the next unACKed RPC's timeout has expired.
|
| 288 |
+
std::thread rpcRetryThread_;
|
| 289 |
+
|
| 290 |
+
// Function that rpcRetryThread_ calls in a loop as long as RpcAgent is
|
| 291 |
+
// running.
|
| 292 |
+
void retryExpiredRpcs();
|
| 293 |
+
|
| 294 |
+
// This is the callback attached to futures corresponding to send retries.
|
| 295 |
+
// This handles 3 cases: 1). send was completed, 2). send failed with an
|
| 296 |
+
// error and we've done maxRetries failed send attempts, and 3). send
|
| 297 |
+
// failed with an error and we have more retries to go. In case 1, we mark
|
| 298 |
+
// the original future as complete. In case 2, we mark the future with an
|
| 299 |
+
// error and do not retry again. In case 3, we move the RpcRetryInfo struct
|
| 300 |
+
// to another time point in the map to schedule the RPC for a future send.
|
| 301 |
+
void rpcRetryCallback(
|
| 302 |
+
JitFuture& message,
|
| 303 |
+
steady_clock_time_point newTime,
|
| 304 |
+
std::shared_ptr<RpcRetryInfo> earliestRpc);
|
| 305 |
+
|
| 306 |
+
// Function that uses the exponential backoff algorithm to compute the next
|
| 307 |
+
// time point to retry a given RPC.
|
| 308 |
+
inline steady_clock_time_point computeNewRpcRetryTime(
|
| 309 |
+
RpcRetryOptions& options,
|
| 310 |
+
int retryCount) {
|
| 311 |
+
// The exponential backoff algorithm being used here is:
|
| 312 |
+
// newTime = timeNow + (retryDuration * (backoffConstant ^ retryCount)).
|
| 313 |
+
std::chrono::milliseconds timedelta =
|
| 314 |
+
std::chrono::duration_cast<std::chrono::milliseconds>(
|
| 315 |
+
options.rpcRetryDuration * pow(options.retryBackoff, retryCount));
|
| 316 |
+
return std::chrono::time_point_cast<std::chrono::milliseconds>(
|
| 317 |
+
std::chrono::steady_clock::now() + timedelta);
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
// Condition Variable to signal when the rpcRetryMap_ has been populated.
|
| 321 |
+
std::condition_variable rpcRetryMapCV_;
|
| 322 |
+
|
| 323 |
+
// Mutex to protect RpcRetryMap_.
|
| 324 |
+
std::mutex rpcRetryMutex_;
|
| 325 |
+
};
|
| 326 |
+
|
| 327 |
+
} // namespace torch::distributed::rpc
|
| 328 |
+
|
| 329 |
+
namespace std {
|
| 330 |
+
template <>
|
| 331 |
+
struct hash<torch::distributed::rpc::WorkerInfo> {
|
| 332 |
+
std::size_t operator()(
|
| 333 |
+
const torch::distributed::rpc::WorkerInfo& worker_info) const noexcept {
|
| 334 |
+
return worker_info.id_;
|
| 335 |
+
}
|
| 336 |
+
};
|
| 337 |
+
} // namespace std
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rpc_command_base.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
|
| 6 |
+
namespace torch::distributed::rpc {
|
| 7 |
+
|
| 8 |
+
// Base class for all RPC request and responses.
|
| 9 |
+
class RpcCommandBase {
|
| 10 |
+
public:
|
| 11 |
+
// Need to override this to serialize the RPC. This should destructively
|
| 12 |
+
// create a message for the RPC (Hence the &&).
|
| 13 |
+
c10::intrusive_ptr<Message> toMessage() && {
|
| 14 |
+
JitRRefPickleGuard jitPickleGuard;
|
| 15 |
+
return std::move(*this).toMessageImpl();
|
| 16 |
+
}
|
| 17 |
+
virtual c10::intrusive_ptr<Message> toMessageImpl() && = 0;
|
| 18 |
+
virtual ~RpcCommandBase() = 0;
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
inline RpcCommandBase::~RpcCommandBase() = default;
|
| 22 |
+
|
| 23 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_context.h
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/rref_impl.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/utils.h>
|
| 8 |
+
|
| 9 |
+
#include <atomic>
|
| 10 |
+
#include <optional>
|
| 11 |
+
|
| 12 |
+
namespace torch::distributed::rpc {
|
| 13 |
+
|
| 14 |
+
namespace callback {
|
| 15 |
+
// It's the callback for RemoteCall.
|
| 16 |
+
void TORCH_API
|
| 17 |
+
confirmPendingUser(const JitFuture& jitFuture, const ForkId& expectedForkId);
|
| 18 |
+
|
| 19 |
+
// It's the callback for finishing creating owner rref, it returned deletedRRef,
|
| 20 |
+
// so that the deletedRRef can be handled under GIL in python_functions.cpp if
|
| 21 |
+
// deletedRRef contains python object.
|
| 22 |
+
c10::intrusive_ptr<RRef> TORCH_API
|
| 23 |
+
finishCreatingOwnerRRef(const JitFuture& jitFuture, const RRefId& rrefId);
|
| 24 |
+
} // namespace callback
|
| 25 |
+
|
| 26 |
+
// Manages RRef lifetime and keeps track of RRef forks.
|
| 27 |
+
class TORCH_API RRefContext {
|
| 28 |
+
public:
|
| 29 |
+
static RRefContext& getInstance();
|
| 30 |
+
// NB: This method must be called before destructing RRefContext singleton.
|
| 31 |
+
// Similar to delForkOfOwner, this method returns a vector of OwnerRRefs that
|
| 32 |
+
// hold py::object. The call-site is also responsible for resetting those
|
| 33 |
+
// shared_ptr objects with a GIL. See comments at delForkOfOwner() for more
|
| 34 |
+
// details.
|
| 35 |
+
static std::vector<c10::intrusive_ptr<RRef>> destroyInstance(
|
| 36 |
+
bool ignoreRRefLeak = true);
|
| 37 |
+
|
| 38 |
+
static void handleException(const JitFuture& jitFuture);
|
| 39 |
+
|
| 40 |
+
// handle exception without throw ::c10::Error again
|
| 41 |
+
static void handleExceptionSilent(const JitFuture& jitFuture);
|
| 42 |
+
|
| 43 |
+
RRefContext(const RRefContext&) = delete;
|
| 44 |
+
RRefContext(RRefContext&& other) = delete;
|
| 45 |
+
void operator=(const RRefContext&) = delete;
|
| 46 |
+
RRefContext& operator=(RRefContext&& other) = delete;
|
| 47 |
+
|
| 48 |
+
~RRefContext();
|
| 49 |
+
|
| 50 |
+
// get the worker id of the current worker
|
| 51 |
+
inline worker_id_t getWorkerId() const {
|
| 52 |
+
return agent_->getWorkerInfo().id_;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
// get the worker name of the current worker
|
| 56 |
+
inline const std::string& getWorkerName() const {
|
| 57 |
+
return agent_->getWorkerInfo().name_;
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
// generate a globally unique ID
|
| 61 |
+
inline GloballyUniqueId genGloballyUniqueId() {
|
| 62 |
+
return GloballyUniqueId(getWorkerId(), nextLocalId_++);
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
inline const std::shared_ptr<RpcAgent>& agent() const {
|
| 66 |
+
return agent_;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// create a ``UserRRef`` owned by the worker ``ownerId``
|
| 70 |
+
c10::intrusive_ptr<UserRRef> createUserRRef(
|
| 71 |
+
worker_id_t ownerId,
|
| 72 |
+
const TypePtr& type);
|
| 73 |
+
|
| 74 |
+
// Convert an RRefForkData into an RRef. This RRef could be user or owner.
|
| 75 |
+
// This RRef could have already existed before, or could be created in this
|
| 76 |
+
// method, we pass type here to validate or help the rref creation.
|
| 77 |
+
c10::intrusive_ptr<RRef> getOrCreateRRef(
|
| 78 |
+
const RRefForkData& rfd,
|
| 79 |
+
const TypePtr& type);
|
| 80 |
+
|
| 81 |
+
// Get the ``OwnerRRef`` of id ``rrefId``. If it does not exist, create a new
|
| 82 |
+
// one. This function is called in two places:
|
| 83 |
+
// 1. when processing ``rpc.remote()``, i.e., ``SCRIPT_REMOTE_CALL``
|
| 84 |
+
// ``PYTHON_REMOTE_CALL``.
|
| 85 |
+
// 2. when unpickling ``OwnerRRef``.
|
| 86 |
+
// What's common in these two cases are, 1) the RRefId is already generated
|
| 87 |
+
// 2) the TypePtr is presented. So it can always create the ``OwnerRRef`` if
|
| 88 |
+
// it is not yet available.
|
| 89 |
+
c10::intrusive_ptr<OwnerRRef> getOrCreateOwnerRRef(
|
| 90 |
+
const RRefId& rrefId,
|
| 91 |
+
const TypePtr& type);
|
| 92 |
+
|
| 93 |
+
// Create an empty owner rref of type.
|
| 94 |
+
// This method is called to first time generate an ``OwnerRRef``, e.g.,
|
| 95 |
+
// 1) ``rpc.RRef(obj)``
|
| 96 |
+
// 2) create the ``OwnerRRef`` on `rpc.remote()` caller side.
|
| 97 |
+
// What's common in these two cases are, 1) the RRefId hasn't been generated
|
| 98 |
+
// 2) the TypePtr is presented.
|
| 99 |
+
c10::intrusive_ptr<OwnerRRef> createOwnerRRef(const TypePtr& type);
|
| 100 |
+
|
| 101 |
+
// Returns a Future of the OwnerRRef, which will be marked completed when
|
| 102 |
+
// ``OwnerRRef`` is created. This method is used when the TypePtr is not
|
| 103 |
+
// available, e.g., when processing to_here(). The forceCreated flag can be
|
| 104 |
+
// used to ensure that the rref is created on the owner, otherwise throw in
|
| 105 |
+
// cases where the user of this API expects this to return a completed future.
|
| 106 |
+
// Note that the return value is a intrusive_ptr to a c10::ivalue::Future that
|
| 107 |
+
// holds the RRef.
|
| 108 |
+
c10::intrusive_ptr<JitFuture> getOwnerRRef(
|
| 109 |
+
const RRefId& rrefId,
|
| 110 |
+
bool forceCreated = false);
|
| 111 |
+
|
| 112 |
+
// Adding the RRefId of an OwnerRRef into the forks_ map. This is useful when
|
| 113 |
+
// making a remote call to self, which as for now, still goes through serde
|
| 114 |
+
// and invokes request callback. In this case, the OwnerRRef has already been
|
| 115 |
+
// created on the send side, and we need to pass it to the receive side,
|
| 116 |
+
// instead of creating a new OwnerRRef. This is done by adding the OwnerRRef
|
| 117 |
+
// into owners_. However, that alone is not enough, as it could be deleted
|
| 118 |
+
// when all UserRRef die, which would then remove the OwnerRRef from owners_
|
| 119 |
+
// and this could happen before the self remote call finishes. To prevent
|
| 120 |
+
// that, this API adds the RRefId as a ForkId, which will then delete the
|
| 121 |
+
// ForkId when the self remote is done.
|
| 122 |
+
void addSelfAsFork(c10::intrusive_ptr<OwnerRRef>& rref);
|
| 123 |
+
|
| 124 |
+
// Register a fork of the ``OwnerRRef``, and inserts a intrusive_ptr of the
|
| 125 |
+
// ``OwnerRRef`` in a map to keep it alive.
|
| 126 |
+
void addForkOfOwner(const RRefId& rrefId, const ForkId& forkId);
|
| 127 |
+
// Performs the same function as addForkOfOwner but ignores duplicate
|
| 128 |
+
// requests. This idempotent function is used with RREF_FORK_REQUEST calls,
|
| 129 |
+
// whereas all other message types use the non-idempotent variant.
|
| 130 |
+
void addForkOfOwnerIfNotPresent(const RRefId& rrefId, const ForkId& forkId);
|
| 131 |
+
// Delete a fork of the ``OwnerRRef``. NB: this could trigger deletion on the
|
| 132 |
+
// IValue or py::object. For the later, this method will acquire GIL.
|
| 133 |
+
// NB: If this fork deletion triggered deleting OwnerRRef, this method will
|
| 134 |
+
// return a shared_ptr to the OwnerRRef, which is likely to be the last
|
| 135 |
+
// shared_ptr instance for it. Therefore, deleting this shared_ptr<OwnerRRef>
|
| 136 |
+
// will also trigger deleting the object it points to. If OwnerRRef holds a
|
| 137 |
+
// py::object, deleting it require GIL. The call site should guarded it with
|
| 138 |
+
// a GIL and reset the shared_ptr. The GIL-guarded deletion is intentionally
|
| 139 |
+
// left out of this function to avoid creating dependency on pybind.
|
| 140 |
+
c10::intrusive_ptr<RRef> delForkOfOwner(
|
| 141 |
+
const RRefId& rrefId,
|
| 142 |
+
const ForkId& forkId);
|
| 143 |
+
|
| 144 |
+
// Invoked when pickling an RRef to setup child/fork properly
|
| 145 |
+
RRefForkData prepareChildFork(const c10::intrusive_ptr<RRef>& rref);
|
| 146 |
+
// Invoked when unpickling an RRef to send RREF_FORK_REQUEST to owner and
|
| 147 |
+
// send RREF_CHILD_ACCEPT to the parent.
|
| 148 |
+
// NB: forkId is necessary here as the rref could be an OwnerRRef
|
| 149 |
+
void notifyOwnerAndParentOfFork(
|
| 150 |
+
const ForkId& forkId,
|
| 151 |
+
worker_id_t parent,
|
| 152 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 153 |
+
|
| 154 |
+
// When a UserRRef is forked to another worker (user or owner), it is added
|
| 155 |
+
// into pendingChildren_ to be held alive until it receives RREF_CHILD_ACCEPT
|
| 156 |
+
// from the child.
|
| 157 |
+
// NB: This is necessary for both user and owner child. As we do not have FIFO
|
| 158 |
+
// communication between workers, we need this strategy to make sure that all
|
| 159 |
+
// previously submitted rpc/remote calls are acked before sending out the
|
| 160 |
+
// RREF_USER_DELETE message. Otherwise, the OwnerRRef could be deleted too
|
| 161 |
+
// soon.
|
| 162 |
+
void addPendingChild(
|
| 163 |
+
const ForkId& forkId,
|
| 164 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 165 |
+
void delPendingChild(const ForkId& forkId);
|
| 166 |
+
|
| 167 |
+
// When a UserRRef is created, it is added into pendingUsers_ to be held alive
|
| 168 |
+
// until it receives RREF_USER_ACCEPT from the owner.
|
| 169 |
+
void addPendingUser(
|
| 170 |
+
const ForkId& forkId,
|
| 171 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 172 |
+
void delPendingUser(const ForkId& forkId);
|
| 173 |
+
void addConfirmedUser(
|
| 174 |
+
const ForkId& forkId,
|
| 175 |
+
const c10::intrusive_ptr<RRef>& rref);
|
| 176 |
+
|
| 177 |
+
// Retrieve a pending user given the fork ID. Throws if the user has already
|
| 178 |
+
// been confirmed (i.e. is no longer in the pendingUsers_ map).
|
| 179 |
+
c10::intrusive_ptr<RRef> getPendingUser(const ForkId& forkId);
|
| 180 |
+
|
| 181 |
+
// Start recording new pending UserRRefs. All pending UserRRefs introduced
|
| 182 |
+
// after this point will be put into the thread_local userTable_, which will
|
| 183 |
+
// then be consumed and cleared in waitForThreadLocalPendingRRefs().
|
| 184 |
+
void recordThreadLocalPendingRRefs();
|
| 185 |
+
// End recording new pending UserRRefs, and clear the thread_local userTable_.
|
| 186 |
+
// Returns a Future which will be marked as completed when all pending
|
| 187 |
+
// UserRRefs in the current userTable_ are confirmed by their owners. The bool
|
| 188 |
+
// value in the Future is unused.
|
| 189 |
+
// This method is useful to make sure RRefs in user function arguments are
|
| 190 |
+
// confirmed before launching user code.
|
| 191 |
+
// NB: Callers of this method does not need to keep the returned Future alive,
|
| 192 |
+
// because this Future is already captured in callbacks of the
|
| 193 |
+
// PendingUserState. If there is no pending UserRRefs, this method returns a
|
| 194 |
+
// completed future.
|
| 195 |
+
c10::intrusive_ptr<JitFuture> waitForThreadLocalPendingRRefs();
|
| 196 |
+
// Only call this function when there are errors during a recording session,
|
| 197 |
+
// and it is likely that waitForThreadLocalPendingRRefs() cannot be invoked
|
| 198 |
+
// properly.
|
| 199 |
+
// TODO: make this a context guard
|
| 200 |
+
void clearRecordedPendingRRefsOnError();
|
| 201 |
+
|
| 202 |
+
void delUser(
|
| 203 |
+
const worker_id_t owner,
|
| 204 |
+
const RRefId& rrefId,
|
| 205 |
+
const ForkId& forkId);
|
| 206 |
+
void delAllUsersAndUnforkedOwners(std::chrono::milliseconds timeoutMillis);
|
| 207 |
+
|
| 208 |
+
std::unordered_map<std::string, std::string> getDebugInfo();
|
| 209 |
+
|
| 210 |
+
private:
|
| 211 |
+
struct PendingUserState {
|
| 212 |
+
PendingUserState(c10::intrusive_ptr<RRef> rref)
|
| 213 |
+
: rref_(std::move(rref)),
|
| 214 |
+
confirmationFuture_(c10::make_intrusive<JitFuture>(BoolType::get())) {
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
inline void confirm() {
|
| 218 |
+
c10::static_intrusive_pointer_cast<UserRRef>(rref_)->confirm();
|
| 219 |
+
confirmationFuture_->markCompleted();
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
c10::intrusive_ptr<RRef> rref_;
|
| 223 |
+
// Use Future.wait() and Future.markCompleted() to block and unblock user
|
| 224 |
+
// functions. The bool value wrapped by the future_ is not used.
|
| 225 |
+
c10::intrusive_ptr<JitFuture> confirmationFuture_;
|
| 226 |
+
};
|
| 227 |
+
|
| 228 |
+
RRefContext(std::shared_ptr<RpcAgent>);
|
| 229 |
+
|
| 230 |
+
c10::intrusive_ptr<UserRRef> createUserRRef(
|
| 231 |
+
worker_id_t ownerId,
|
| 232 |
+
const RRefId& rrefId,
|
| 233 |
+
const ForkId& forkId,
|
| 234 |
+
const TypePtr& type);
|
| 235 |
+
|
| 236 |
+
void finishForkRequest(const ForkId& forkId, worker_id_t parent);
|
| 237 |
+
|
| 238 |
+
// If there is any leak on any RRef, this method will throw an error.
|
| 239 |
+
void checkRRefLeaks(bool ignoreRRefLeak);
|
| 240 |
+
|
| 241 |
+
static std::atomic<local_id_t> nextLocalId_;
|
| 242 |
+
|
| 243 |
+
const std::shared_ptr<RpcAgent> agent_;
|
| 244 |
+
mutable std::mutex mutex_;
|
| 245 |
+
// Keep OwnerRRefs alive until there is no living UserRRefs.
|
| 246 |
+
std::unordered_map<RRefId, c10::intrusive_ptr<RRef>, RRefId::Hash> owners_;
|
| 247 |
+
// A map to track OwnerRRefs that are requested but not yet created. This can
|
| 248 |
+
// happen if the to_here() message is processed on the owner before the
|
| 249 |
+
// corresponding creator rpc.remote() message. If this happens, instead of
|
| 250 |
+
// to_here() RPC thread to block waiting for the OwnerRRef creation, the
|
| 251 |
+
// RRefContext returns a Future, so that the RPC request processing logic can
|
| 252 |
+
// attach subsequent code as a callback to that Future.
|
| 253 |
+
// NB: the OwnerRRefs in this map must be cleared when the corresponding
|
| 254 |
+
// OwnerRRef is created. Note that the values in this map are intrusive_ptrs
|
| 255 |
+
// to c10::ivalue::Future that will be marked completed with the owner RRef.
|
| 256 |
+
std::unordered_map<RRefId, c10::intrusive_ptr<JitFuture>, RRefId::Hash>
|
| 257 |
+
pendingOwners_;
|
| 258 |
+
// Tracks known living UserRRefs of an OwnerRRef
|
| 259 |
+
std::unordered_map<
|
| 260 |
+
RRefId,
|
| 261 |
+
std::unordered_set<ForkId, ForkId::Hash>,
|
| 262 |
+
RRefId::Hash>
|
| 263 |
+
forks_;
|
| 264 |
+
|
| 265 |
+
// This cond var is used by deleteAllUsers(), a event notification is sent if
|
| 266 |
+
// number of pending UserRRef or UserRRef children is reduced, or
|
| 267 |
+
// number of owned OwnerRRef is reduced.
|
| 268 |
+
std::condition_variable deleteAllUsersCV_;
|
| 269 |
+
// The follow 3 maps keep UserRRefs alive by holding a intrusive_ptr to the
|
| 270 |
+
// RRef instances. A UserRRef must be added into this map if any of the
|
| 271 |
+
// following two conditions is true:
|
| 272 |
+
//
|
| 273 |
+
// (1) A UserRRef has not been accepted by owner yet.
|
| 274 |
+
//
|
| 275 |
+
// It can be used or shared, but cannot be deleted, and hence kept alive
|
| 276 |
+
// in this map. A message of type RREF_USER_ACCEPT will move the
|
| 277 |
+
// corresponding RRef from pendingUsers_ map to confirmedUsers_ map.
|
| 278 |
+
std::unordered_map<ForkId, std::shared_ptr<PendingUserState>, ForkId::Hash>
|
| 279 |
+
pendingUsers_;
|
| 280 |
+
// UserRRefs are added into this map when it is confirmed by the owner.
|
| 281 |
+
// When destroying RRefContext this map helps to find local UserRRefs
|
| 282 |
+
// and send delete messages if they are still not deleted by Python
|
| 283 |
+
// garbage collection.
|
| 284 |
+
std::unordered_map<ForkId, c10::weak_intrusive_ptr<RRef>, ForkId::Hash>
|
| 285 |
+
confirmedUsers_;
|
| 286 |
+
|
| 287 |
+
// (2) A UserRRef has forked a child UserRRef which has not been accepted by
|
| 288 |
+
// the owner yet.
|
| 289 |
+
//
|
| 290 |
+
// In this case, this UserRRef cannot send out RREF_USER_DELETE message,
|
| 291 |
+
// as it could potentially trigger the OwnerRRef been deleted before the
|
| 292 |
+
// owner learns about the forked child.
|
| 293 |
+
std::unordered_map<ForkId, c10::intrusive_ptr<RRef>, ForkId::Hash>
|
| 294 |
+
pendingChildren_;
|
| 295 |
+
|
| 296 |
+
// The RRef context performs its operations through async RPC requests, in
|
| 297 |
+
// order to not block the user code. Therefore the RRef context's state may be
|
| 298 |
+
// lagging a bit behind what it is intended to be, while it waits for these
|
| 299 |
+
// requests to complete. To allow syncing when needed, we store the count of
|
| 300 |
+
// these pending requests, so that users can wait for it to reach zero.
|
| 301 |
+
std::atomic<int64_t> numPendingFutures_{0};
|
| 302 |
+
|
| 303 |
+
std::mutex destroyedMutex_;
|
| 304 |
+
bool destroyed_{false};
|
| 305 |
+
|
| 306 |
+
// Thread local states to keep UserRRefs deserialized from user function
|
| 307 |
+
// arguments.
|
| 308 |
+
static thread_local std::vector<std::shared_ptr<PendingUserState>> userTable_;
|
| 309 |
+
// A flag indicating whether subsequently created UserRRefs should be added to
|
| 310 |
+
// the thread_local userTable_. The flag is set to true before serializing
|
| 311 |
+
// RPC arguments and then set to false before running the corresponding
|
| 312 |
+
// user code. See addPendingUser and delPendingUser for more details.
|
| 313 |
+
// NB: The reason for having this flag is because addPendingUser are called in
|
| 314 |
+
// two cases, and we only want to track the 2nd case.
|
| 315 |
+
// (1) RRef as the return value: when calling rpc.remote, the UserRRef on the
|
| 316 |
+
// caller side is added to the context using addPendingUser.
|
| 317 |
+
// (2) RRef as an argument: When running an RPC using RRefs as arguments, the
|
| 318 |
+
// RRef is forwarded to the callee as new UserRRefs (if the callee is not
|
| 319 |
+
// the owner). In this case, we block running the user function until all
|
| 320 |
+
// UserRRefs are confirmed by the owner.
|
| 321 |
+
// This contract gurantees that no UserRRefs can be used remotely without
|
| 322 |
+
// confirmation. Note that, however, the UserRRef created by rpc.remote can
|
| 323 |
+
// still be passed to local functions as arguments and used there. This is by
|
| 324 |
+
// design, because this feature is especially useful when, say a master node
|
| 325 |
+
// creates multiple UserRRefs in a loop and then shares them with other nodes.
|
| 326 |
+
// Blocking every iteration in the loop until RRefs are confirmed will slow
|
| 327 |
+
// this down. This nuance on UserRRef can be interpreted as we only make
|
| 328 |
+
// exceptions for UserRRef creators. And using the UserRRef on its creator
|
| 329 |
+
// without confirmation is OK, because the creator would either call to_here
|
| 330 |
+
// or forward the UserRRef, and both would then require confirmations from the
|
| 331 |
+
// owner.
|
| 332 |
+
static thread_local bool recording_;
|
| 333 |
+
};
|
| 334 |
+
|
| 335 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_impl.h
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/jit_type.h>
|
| 4 |
+
#include <ATen/core/rref_interface.h>
|
| 5 |
+
#include <c10/core/Event.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 8 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 9 |
+
#include <optional>
|
| 10 |
+
|
| 11 |
+
#include <atomic>
|
| 12 |
+
|
| 13 |
+
namespace torch::distributed::rpc {
|
| 14 |
+
|
| 15 |
+
class RRef;
|
| 16 |
+
class RRefContext;
|
| 17 |
+
class UserRRef;
|
| 18 |
+
|
| 19 |
+
constexpr int OWNER_IDX = 0; // index of ownerId in the tuple
|
| 20 |
+
constexpr int RREFID_ON_IDX = 1; // index of RRefId.createdOn_ in the tuple
|
| 21 |
+
constexpr int RREFID_ID_IDX = 2; // index of RRefId.localId_ in the tuple
|
| 22 |
+
constexpr int FORKID_ON_IDX = 3; // index of ForkId.createdOn_ in the tuple
|
| 23 |
+
constexpr int FORKID_ID_IDX = 4; // index of ForkId.localId_ in the tuple
|
| 24 |
+
constexpr int PARENT_IDX = 5; // index of parent in the tuple
|
| 25 |
+
constexpr int TYPE_IDX = 6; // index of parent in the tuple
|
| 26 |
+
|
| 27 |
+
// NB: if more fields are added, make sure this field is also bumped
|
| 28 |
+
constexpr int RFD_TUPLE_SIZE = 7; // number of RRefForkData fields in py::tuple
|
| 29 |
+
|
| 30 |
+
// Represents fork of an RRef to be sent over the wire.
|
| 31 |
+
struct TORCH_API RRefForkData {
|
| 32 |
+
const worker_id_t ownerId_;
|
| 33 |
+
const RRefId rrefId_;
|
| 34 |
+
const ForkId forkId_;
|
| 35 |
+
const worker_id_t parent_;
|
| 36 |
+
const std::string typeStr_;
|
| 37 |
+
|
| 38 |
+
RRefForkData(
|
| 39 |
+
worker_id_t ownerId,
|
| 40 |
+
const RRefId& rrefId,
|
| 41 |
+
const ForkId& forkId,
|
| 42 |
+
worker_id_t parent,
|
| 43 |
+
std::string typeStr);
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
// Note [RRef Protocol]
|
| 47 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
|
| 48 |
+
//
|
| 49 |
+
// [Background]
|
| 50 |
+
//
|
| 51 |
+
// RRef stands for Remote REFerence. Each RRef is owned by a single worker
|
| 52 |
+
// (i.e., owner) and can be used by multiple users. The owner stores the real
|
| 53 |
+
// data referenced by its RRefs. RRef needs to support fast and scalable RPC.
|
| 54 |
+
// Hence, in the design, we avoid using a single global master to keep RRef
|
| 55 |
+
// states, instead owners will keep track of the global reference counts
|
| 56 |
+
// for its RRefs. Every RRef can be uniquely identified by a global RRefId,
|
| 57 |
+
// which is assigned at the time it is first created either on a user or on the
|
| 58 |
+
// owner.
|
| 59 |
+
//
|
| 60 |
+
// On the owner worker, there is only one OwnerRRef instance, which contains the
|
| 61 |
+
// real data, while on user workers, there can be as many UserRRefs as
|
| 62 |
+
// necessary, and UserRRef does not hold the data. All usage on the OwnerRRef
|
| 63 |
+
// should retrieve the unique OwnerRRef instance using the globally unique
|
| 64 |
+
// RRefId. //A UserRRef will be created when it is used as an argument or return
|
| 65 |
+
// value in dist.rpc or dist.remote call, but RRef forking and reference
|
| 66 |
+
// counting (RC) are completely transparent to applications. Every UserRRef will
|
| 67 |
+
// also have its globally unique ForkId.
|
| 68 |
+
//
|
| 69 |
+
// [Assumptions]
|
| 70 |
+
//
|
| 71 |
+
// 1. Transient Network Failures
|
| 72 |
+
//
|
| 73 |
+
// TODO: current RRef implementation does not tolerate failures
|
| 74 |
+
//
|
| 75 |
+
// The RRef design handles transient network failures by retrying
|
| 76 |
+
// messages. Node crashes or permanent network partition is beyond the scope.
|
| 77 |
+
// When those incidents occur, the application may take down all workers, revert
|
| 78 |
+
// to the previous checkpoint, and resume training.
|
| 79 |
+
//
|
| 80 |
+
// 2. Non-idempotent UDFs
|
| 81 |
+
//
|
| 82 |
+
// We assume UDFs are not idempotent and therefore cannot be retried. However,
|
| 83 |
+
// internal RRef control messages are idempotent and retried upon message
|
| 84 |
+
// failure.
|
| 85 |
+
//
|
| 86 |
+
// TODO: RRef internal messages are not yet idempotent
|
| 87 |
+
//
|
| 88 |
+
// 3. Out of Order Message Delivery
|
| 89 |
+
//
|
| 90 |
+
// We do not assume message delivery order between any pair of nodes, because
|
| 91 |
+
// both sender and receiver are using multiple threads. There is no guarantee on
|
| 92 |
+
// which message will be processed first.
|
| 93 |
+
//
|
| 94 |
+
// [RRef Lifetime]
|
| 95 |
+
//
|
| 96 |
+
// The goal of the protocol is to delete an OwnerRRef at an appropriate time.
|
| 97 |
+
// The right time to delete an OwnerRRef is when there are no living UserRRefs
|
| 98 |
+
// and Python GC also agrees to delete the OwnerRRef instance on the owner. The
|
| 99 |
+
// tricky part is to determine if there are any living UserRRefs.
|
| 100 |
+
//
|
| 101 |
+
// A user can get a UserRRef in three situations:
|
| 102 |
+
//
|
| 103 |
+
// (1). Receiving a UserRRef from the owner.
|
| 104 |
+
// (2). Receiving a UserRRef from another user.
|
| 105 |
+
// (3). Creating a new UserRRef owned by another worker.
|
| 106 |
+
//
|
| 107 |
+
// (1) is the simplest case where the owner initiates the fork, and hence it can
|
| 108 |
+
// easily increment local RC. The only requirement is that any UserRRef must
|
| 109 |
+
// notify the owner before destruction. Hence, we need the first guarantee:
|
| 110 |
+
//
|
| 111 |
+
// G1. The owner will be notified when any UserRRef is deleted.
|
| 112 |
+
//
|
| 113 |
+
// As messages might come delayed or out-of-order, we need more one guarantee to
|
| 114 |
+
// make sure the delete message is not sent out too soon. Let us first introduce
|
| 115 |
+
// a new concept. If A sends an RPC to B that involves an RRef, we call the RRef
|
| 116 |
+
// on A the parent RRef and the RRef on B the child RRef.
|
| 117 |
+
//
|
| 118 |
+
// G2. Parent RRef cannot be deleted until the child RRef is confirmed by the
|
| 119 |
+
// owner.
|
| 120 |
+
//
|
| 121 |
+
// Under (1), where the caller is UserRRef and callee is OwnerRRef, it simply
|
| 122 |
+
// means that the user will not send out the delete message until all previous
|
| 123 |
+
// messages are ACKed. Note that ACKed does not mean the owner finishes
|
| 124 |
+
// executing the function, instead, it only means the owner has retrieved its
|
| 125 |
+
// local OwnerRRef and about to pass it to the function, which is sufficient to
|
| 126 |
+
// keep the OwnerRRef alive even if the delete message from the user arrives at
|
| 127 |
+
// the owner before the function finishes execution.
|
| 128 |
+
//
|
| 129 |
+
// With (2) and (3), it is possible that the owner only partially knows the RRef
|
| 130 |
+
// fork graph or not even knowing it at all. For example, the RRef could be
|
| 131 |
+
// constructed on a user, and before the owner receives the RPC call, the
|
| 132 |
+
// creator user might have already shared the RRef with other users, and those
|
| 133 |
+
// users could further share the RRef. One invariant is that the fork graph of
|
| 134 |
+
// any RRef is always a tree rooted at the owner, because forking an RRef always
|
| 135 |
+
// creates a new RRef instance, and hence every RRef has a single parent. One
|
| 136 |
+
// nasty detail is that when an RRef is created on a user, technically the owner
|
| 137 |
+
// is not its parent but we still consider it that way and it does not break the
|
| 138 |
+
// argument below.
|
| 139 |
+
//
|
| 140 |
+
// The owner's view on any node (fork) in the tree has three stages:
|
| 141 |
+
//
|
| 142 |
+
// 1) unknown -> 2) known -> 3) deleted.
|
| 143 |
+
//
|
| 144 |
+
// The owner's view on the entire tree keeps changing. The owner deletes its
|
| 145 |
+
// OwnerRRef instance when it thinks there are no living UserRRefs, i.e., when
|
| 146 |
+
// OwnerRRef is deleted, all UserRRefs could be either indeed deleted or
|
| 147 |
+
// unknown. The dangerous case is when some forks are unknown and others are
|
| 148 |
+
// deleted.
|
| 149 |
+
//
|
| 150 |
+
// G2 trivially guarantees that no parent UserRRef Y can be deleted before the
|
| 151 |
+
// owner knows all of Y's children UserRRefs.
|
| 152 |
+
//
|
| 153 |
+
// However, it is possible that the child UserRRef Z may be deleted before the
|
| 154 |
+
// owner knows its parent Y. More specifically, this can happen when all of Z's
|
| 155 |
+
// messages are processed by the owner before all messages from Y, including the
|
| 156 |
+
// delete message. Nevertheless, this does not cause any problem. Because, at
|
| 157 |
+
// least one of Y's ancestor will be alive, and it will prevent the owner from
|
| 158 |
+
// deleting the OwnerRRef. Consider the following example: (NB: this scenario
|
| 159 |
+
// will no longer relevant when we block UDF until all RRefs are confirmed by
|
| 160 |
+
// the owner)
|
| 161 |
+
//
|
| 162 |
+
// OwnerRRef -> A -> Y -> Z
|
| 163 |
+
//
|
| 164 |
+
// OwnerRRef forks to A, then A forks to Y, and Y forks to Z. Z can be deleted
|
| 165 |
+
// without OwnerRRef knowing Y. However, the OwnerRRef will at least know A, as
|
| 166 |
+
// the owner directly forks the RRef to A. A won't die before the owner knows Y.
|
| 167 |
+
//
|
| 168 |
+
// Things get a little trickier if the RRef is created on a user:
|
| 169 |
+
//
|
| 170 |
+
// OwnerRRef
|
| 171 |
+
// ^
|
| 172 |
+
// |
|
| 173 |
+
// A -> Y -> Z
|
| 174 |
+
//
|
| 175 |
+
// If Z calls to_here on the UserRRef, the owner at least knows A when Z is
|
| 176 |
+
// deleted, because otherwise to_here wouldn't finish. If Z does not call
|
| 177 |
+
// to_here, it is possible that the owner receives all messages from Z before
|
| 178 |
+
// any message from A and Y. In this case, as the real data of the OwnerRRef has
|
| 179 |
+
// not been created yet, there is nothing to be deleted either. It is the same
|
| 180 |
+
// as Z does not exist at all Hence, it's still OK.
|
| 181 |
+
//
|
| 182 |
+
// See #26759 for more details and discussions.
|
| 183 |
+
//
|
| 184 |
+
// TODO: make RRef an IValue, and edit createStackForSchema accordingly
|
| 185 |
+
// TODO: make RRef system messages idempotent and retry on failures.
|
| 186 |
+
//
|
| 187 |
+
// ``RRef`` is the base type for both ``UserRRef`` and ``OwnerRRef``.
|
| 188 |
+
// Each ``RRef`` has a globally unique ``RRefId``.
|
| 189 |
+
class TORCH_API RRef : public RRefInterface {
|
| 190 |
+
public:
|
| 191 |
+
// RRef is made NOT copyable NOT movable to prevent messing up reference
|
| 192 |
+
// counting.
|
| 193 |
+
explicit RRef(const RRef& other) = delete;
|
| 194 |
+
explicit RRef(RRef&& other) = delete;
|
| 195 |
+
RRef& operator=(RRef&& other) = delete;
|
| 196 |
+
|
| 197 |
+
~RRef() override = default;
|
| 198 |
+
|
| 199 |
+
// returns the worker id of the owner
|
| 200 |
+
inline worker_id_t owner() const override {
|
| 201 |
+
return ownerId_;
|
| 202 |
+
}
|
| 203 |
+
|
| 204 |
+
// returns the worker name of the owner
|
| 205 |
+
inline std::string ownerName() const override {
|
| 206 |
+
return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_).name_;
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
// returns the worker info of the owner
|
| 210 |
+
inline WorkerInfo ownerWorkerInfo() const {
|
| 211 |
+
return RpcAgent::getCurrentRpcAgent()->getWorkerInfo(ownerId_);
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
// Returns the globally unique RRefId of this RRef
|
| 215 |
+
inline const RRefId& rrefId() const {
|
| 216 |
+
return rrefId_;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
inline bool isPyObj() const {
|
| 220 |
+
return type_ == PyObjectType::get();
|
| 221 |
+
}
|
| 222 |
+
inline const TypePtr type() const override {
|
| 223 |
+
return type_;
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
// Save the future corresponding to the creation of this RRef on a remote
|
| 227 |
+
// node. Note that this is only set when processing requests invoked with
|
| 228 |
+
// rpc.remote. This is only used to get the future corresponding to the rref
|
| 229 |
+
// for profiling use cases.
|
| 230 |
+
inline void registerOwnerCreationFuture(c10::intrusive_ptr<JitFuture> fut) {
|
| 231 |
+
ownerCreationFuture_ = std::move(fut);
|
| 232 |
+
}
|
| 233 |
+
|
| 234 |
+
// Get the future corresponding to the creation of this rref.
|
| 235 |
+
inline c10::intrusive_ptr<JitFuture> getOwnerCreationFuture() const {
|
| 236 |
+
return ownerCreationFuture_;
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
// Check if creation of this RRef on owner node has timed out.
|
| 240 |
+
inline bool getTimedOut() const {
|
| 241 |
+
return timedOut_.load();
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
// Dispatches an error to the correct handler based on its RPCErrorType.
|
| 245 |
+
void handleError(RPCErrorType errorType, const JitFuture& JitFuture);
|
| 246 |
+
|
| 247 |
+
// Send delete UserRRef request to Owner,
|
| 248 |
+
// if the request hasn't been sent yet.
|
| 249 |
+
// There are 2 cases to call it,
|
| 250 |
+
// 1, Python GC decides end of UserRRef lifetime, calling destructor.
|
| 251 |
+
// 2, RPC module graceful shutdown calls it on all UserRRefs tracked
|
| 252 |
+
// in the RRefContext.
|
| 253 |
+
virtual void tryDel() {}
|
| 254 |
+
|
| 255 |
+
protected:
|
| 256 |
+
// Indicates that the creation of this RRef on owner node has timed out.
|
| 257 |
+
inline void setTimedOut() {
|
| 258 |
+
timedOut_ = true;
|
| 259 |
+
}
|
| 260 |
+
friend class RRefContext;
|
| 261 |
+
|
| 262 |
+
RRef(worker_id_t ownerId, const RRefId& rrefId, TypePtr type);
|
| 263 |
+
|
| 264 |
+
virtual RRefForkData fork() const;
|
| 265 |
+
|
| 266 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 267 |
+
const worker_id_t ownerId_;
|
| 268 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 269 |
+
const RRefId rrefId_;
|
| 270 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 271 |
+
std::atomic<bool> timedOut_{false};
|
| 272 |
+
|
| 273 |
+
// type field to denote the type of the element that the RRef is holding
|
| 274 |
+
// it could be any TypePtr that JIT support, including PyObjectType
|
| 275 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 276 |
+
const TypePtr type_;
|
| 277 |
+
// Future corresponding to request to create RRef on remote node.
|
| 278 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 279 |
+
c10::intrusive_ptr<JitFuture> ownerCreationFuture_;
|
| 280 |
+
};
|
| 281 |
+
|
| 282 |
+
// ``UserRRef`` represents a user of an RRef. Besides the ``RRefId``, each user
|
| 283 |
+
// also has a globally unique ``ForkId`` to identify this user. ``UserRRef``
|
| 284 |
+
// never owns the real value, the only way to get the value of the ``RRef`` is
|
| 285 |
+
// to call ``to_here()`` and get a copy..
|
| 286 |
+
class TORCH_API UserRRef final : public RRef {
|
| 287 |
+
public:
|
| 288 |
+
UserRRef(const UserRRef& other) = delete;
|
| 289 |
+
UserRRef(UserRRef&& other) = delete;
|
| 290 |
+
UserRRef& operator=(const UserRRef& other) = delete;
|
| 291 |
+
UserRRef& operator=(UserRRef&& other) = delete;
|
| 292 |
+
|
| 293 |
+
UserRRef(
|
| 294 |
+
worker_id_t ownerId,
|
| 295 |
+
const RRefId& rrefId,
|
| 296 |
+
const ForkId& forkId,
|
| 297 |
+
TypePtr type);
|
| 298 |
+
|
| 299 |
+
inline bool isOwner() const override {
|
| 300 |
+
return false;
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
inline bool confirmedByOwner() const override {
|
| 304 |
+
return confirmedByOwner_;
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
// Returns the globally unique ForkId of this RRef
|
| 308 |
+
const ForkId& forkId() const;
|
| 309 |
+
|
| 310 |
+
// Get of copy of the value from the ``OwnerRRef``. If the value is not ready
|
| 311 |
+
// yet, this call will block.
|
| 312 |
+
IValue toHere(
|
| 313 |
+
const float timeoutSeconds =
|
| 314 |
+
torch::distributed::rpc::kUnsetRpcTimeout) const;
|
| 315 |
+
|
| 316 |
+
void tryDel() override;
|
| 317 |
+
|
| 318 |
+
// Will be called when refcount reaches 0.
|
| 319 |
+
// Upon destruction, this ``UserRRef`` will tell the owner to deref.
|
| 320 |
+
void release_resources() override;
|
| 321 |
+
|
| 322 |
+
// Will be called when both refcount and weakcount reach 0. See
|
| 323 |
+
// https://github.com/pytorch/pytorch/blob/9116f02bebf3a5260feef5732d36c54ecb3b4033/c10/util/intrusive_ptr.h#L204
|
| 324 |
+
// This is called on destructing the wrapping intrusive_ptr_target instance
|
| 325 |
+
// and it's data members.
|
| 326 |
+
~UserRRef() override;
|
| 327 |
+
|
| 328 |
+
private:
|
| 329 |
+
friend class RRefContext;
|
| 330 |
+
|
| 331 |
+
RRefForkData fork() const override;
|
| 332 |
+
inline void confirm() {
|
| 333 |
+
confirmedByOwner_ = true;
|
| 334 |
+
}
|
| 335 |
+
|
| 336 |
+
const ForkId forkId_;
|
| 337 |
+
|
| 338 |
+
// Indicates if this user has sent delete message to it's owner.
|
| 339 |
+
// Note, thread safety is needed because delete message could be sent by
|
| 340 |
+
// either the destructor called by Python garbage collection or RRefContext
|
| 341 |
+
// proactive cleanup on RPC graceful shutdown.
|
| 342 |
+
std::mutex deletedOnOwnerMutex_;
|
| 343 |
+
bool deletedOnOwner_{false};
|
| 344 |
+
// Indicating whether this UserRRef has been confirmed by its owner.
|
| 345 |
+
std::atomic<bool> confirmedByOwner_;
|
| 346 |
+
};
|
| 347 |
+
|
| 348 |
+
// Keep the template only on the derived class because ``RRefContext`` needs to
|
| 349 |
+
// erase the type on ``RRef`` and keep them in one map.
|
| 350 |
+
class TORCH_API OwnerRRef final : public RRef {
|
| 351 |
+
public:
|
| 352 |
+
OwnerRRef(const OwnerRRef& other) = delete;
|
| 353 |
+
OwnerRRef(OwnerRRef&& other) = delete;
|
| 354 |
+
OwnerRRef& operator=(const OwnerRRef& other) = delete;
|
| 355 |
+
OwnerRRef& operator=(OwnerRRef&& other) = delete;
|
| 356 |
+
|
| 357 |
+
OwnerRRef(
|
| 358 |
+
worker_id_t ownerId,
|
| 359 |
+
const RRefId& rrefId,
|
| 360 |
+
TypePtr type,
|
| 361 |
+
std::vector<c10::Device> devices);
|
| 362 |
+
|
| 363 |
+
OwnerRRef(
|
| 364 |
+
worker_id_t ownerId,
|
| 365 |
+
const RRefId& rrefId,
|
| 366 |
+
TypePtr type,
|
| 367 |
+
std::optional<IValue> value,
|
| 368 |
+
std::vector<c10::Device> devices);
|
| 369 |
+
|
| 370 |
+
inline bool isOwner() const override {
|
| 371 |
+
return true;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
// OwnerRRef is always confirmed, while UserRRef is only confirmed when the
|
| 375 |
+
// owner knows about it.
|
| 376 |
+
inline bool confirmedByOwner() const override {
|
| 377 |
+
return true;
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
// Get a constant reference of the real value. This method will block if the
|
| 381 |
+
// value is not ready. This method does not need GIL as it does not create
|
| 382 |
+
// any new py::object. It will throw if there is an error.
|
| 383 |
+
const IValue& getValue() const;
|
| 384 |
+
|
| 385 |
+
// Set the value of this ``OwnerRRef``. This method does not need GIL as it
|
| 386 |
+
// does not create any new py::object.
|
| 387 |
+
void setValue(IValue&& value);
|
| 388 |
+
// Sets the value of this ``OwnerRRef`` to contain an exception.
|
| 389 |
+
void setError(std::exception_ptr eptr);
|
| 390 |
+
|
| 391 |
+
// Has a value or error been set?
|
| 392 |
+
bool hasValue() const;
|
| 393 |
+
// Gets a future that is satisfied when the value or error is set.
|
| 394 |
+
c10::intrusive_ptr<JitFuture> getFuture();
|
| 395 |
+
|
| 396 |
+
private:
|
| 397 |
+
friend class RRefContext;
|
| 398 |
+
|
| 399 |
+
c10::intrusive_ptr<JitFuture> future_;
|
| 400 |
+
};
|
| 401 |
+
|
| 402 |
+
TORCH_API std::ostream& operator<<(std::ostream& os, const RRef& rref);
|
| 403 |
+
|
| 404 |
+
// Helper function that casts from c10::RRefInterface to OwnerRRef
|
| 405 |
+
inline TORCH_API c10::intrusive_ptr<OwnerRRef> fromRRefInterface(
|
| 406 |
+
const c10::intrusive_ptr<c10::RRefInterface>& rrefInterface) {
|
| 407 |
+
return c10::static_intrusive_pointer_cast<OwnerRRef>(rrefInterface);
|
| 408 |
+
}
|
| 409 |
+
|
| 410 |
+
// Helper function that casts from OwnerRRef to c10::RRefInterface
|
| 411 |
+
inline TORCH_API c10::intrusive_ptr<c10::RRefInterface> fromOwnerRRef(
|
| 412 |
+
const c10::intrusive_ptr<RRef>& ownerRRef) {
|
| 413 |
+
return c10::static_intrusive_pointer_cast<c10::RRefInterface>(ownerRRef);
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/rref_proto.h
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 6 |
+
#include <torch/csrc/jit/runtime/operator.h>
|
| 7 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
namespace torch::distributed::rpc {
|
| 11 |
+
|
| 12 |
+
// Temporary solution of RRef operations.
|
| 13 |
+
// TODO: Remove all these messages and use rpc + registered functions instead.
|
| 14 |
+
class TORCH_API RRefMessageBase : public RpcCommandBase {
|
| 15 |
+
public:
|
| 16 |
+
RRefMessageBase(const RRefId& rrefId, MessageType type)
|
| 17 |
+
: rrefId_(rrefId), type_(type) {}
|
| 18 |
+
|
| 19 |
+
const RRefId& rrefId();
|
| 20 |
+
|
| 21 |
+
protected:
|
| 22 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 23 |
+
const RRefId rrefId_;
|
| 24 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 25 |
+
const MessageType type_;
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
class TORCH_API ForkMessageBase : public RRefMessageBase {
|
| 29 |
+
public:
|
| 30 |
+
ForkMessageBase(const RRefId& rrefId, const ForkId& forkId, MessageType type)
|
| 31 |
+
: RRefMessageBase(rrefId, type), forkId_(forkId) {}
|
| 32 |
+
|
| 33 |
+
const ForkId& forkId();
|
| 34 |
+
|
| 35 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 36 |
+
static std::pair<RRefId, ForkId> fromMessage(
|
| 37 |
+
const Message& message,
|
| 38 |
+
MessageType type);
|
| 39 |
+
|
| 40 |
+
protected:
|
| 41 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 42 |
+
const ForkId forkId_;
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
// UserRRef uses this message to fetch the remote RRef value from the owner.
|
| 46 |
+
class TORCH_API ScriptRRefFetchCall final : public RRefMessageBase {
|
| 47 |
+
public:
|
| 48 |
+
ScriptRRefFetchCall(worker_id_t fromWorkerId, const RRefId& rrefId)
|
| 49 |
+
: RRefMessageBase(rrefId, MessageType::SCRIPT_RREF_FETCH_CALL),
|
| 50 |
+
fromWorkerId_(fromWorkerId) {}
|
| 51 |
+
|
| 52 |
+
inline worker_id_t fromWorkerId() const {
|
| 53 |
+
return fromWorkerId_;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 57 |
+
static std::unique_ptr<ScriptRRefFetchCall> fromMessage(
|
| 58 |
+
const Message& message);
|
| 59 |
+
|
| 60 |
+
private:
|
| 61 |
+
const worker_id_t fromWorkerId_;
|
| 62 |
+
};
|
| 63 |
+
|
| 64 |
+
class TORCH_API PythonRRefFetchCall final : public RRefMessageBase {
|
| 65 |
+
public:
|
| 66 |
+
PythonRRefFetchCall(worker_id_t fromWorkerId, const RRefId& rrefId)
|
| 67 |
+
: RRefMessageBase(rrefId, MessageType::PYTHON_RREF_FETCH_CALL),
|
| 68 |
+
fromWorkerId_(fromWorkerId) {}
|
| 69 |
+
|
| 70 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 71 |
+
static std::unique_ptr<PythonRRefFetchCall> fromMessage(
|
| 72 |
+
const Message& message);
|
| 73 |
+
|
| 74 |
+
private:
|
| 75 |
+
const worker_id_t fromWorkerId_;
|
| 76 |
+
};
|
| 77 |
+
|
| 78 |
+
// OwnerRRef uses this message to send the RRef value to a remote UserRRef
|
| 79 |
+
class TORCH_API RRefFetchRet : public RpcCommandBase {
|
| 80 |
+
public:
|
| 81 |
+
RRefFetchRet(std::vector<at::IValue> values, MessageType type)
|
| 82 |
+
: values_(std::move(values)), type_(type) {}
|
| 83 |
+
|
| 84 |
+
const std::vector<at::IValue>& values();
|
| 85 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 86 |
+
|
| 87 |
+
private:
|
| 88 |
+
std::vector<at::IValue> values_;
|
| 89 |
+
const MessageType type_;
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
class TORCH_API ScriptRRefFetchRet final : public RRefFetchRet {
|
| 93 |
+
public:
|
| 94 |
+
explicit ScriptRRefFetchRet(std::vector<at::IValue> values)
|
| 95 |
+
: RRefFetchRet(std::move(values), MessageType::SCRIPT_RREF_FETCH_RET) {}
|
| 96 |
+
|
| 97 |
+
static std::unique_ptr<ScriptRRefFetchRet> fromMessage(
|
| 98 |
+
const Message& message);
|
| 99 |
+
};
|
| 100 |
+
|
| 101 |
+
class TORCH_API PythonRRefFetchRet final : public RRefFetchRet {
|
| 102 |
+
public:
|
| 103 |
+
explicit PythonRRefFetchRet(std::vector<at::IValue> values)
|
| 104 |
+
: RRefFetchRet(std::move(values), MessageType::PYTHON_RREF_FETCH_RET) {}
|
| 105 |
+
|
| 106 |
+
static std::unique_ptr<PythonRRefFetchRet> fromMessage(
|
| 107 |
+
const Message& message);
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
// UserRRef (regardless it's the creator or not) uses this message to notify
|
| 111 |
+
// OwnerRRef on delete.
|
| 112 |
+
class TORCH_API RRefUserDelete final : public ForkMessageBase {
|
| 113 |
+
public:
|
| 114 |
+
RRefUserDelete(const RRefId& rrefId, const ForkId& forkId)
|
| 115 |
+
: ForkMessageBase(rrefId, forkId, MessageType::RREF_USER_DELETE) {}
|
| 116 |
+
|
| 117 |
+
static std::unique_ptr<RRefUserDelete> fromMessage(const Message& message);
|
| 118 |
+
};
|
| 119 |
+
|
| 120 |
+
class TORCH_API RemoteRet final : public ForkMessageBase {
|
| 121 |
+
public:
|
| 122 |
+
RemoteRet(const RRefId& rrefId, const ForkId& forkId)
|
| 123 |
+
: ForkMessageBase(rrefId, forkId, MessageType::REMOTE_RET) {}
|
| 124 |
+
|
| 125 |
+
static std::unique_ptr<RemoteRet> fromMessage(const Message& message);
|
| 126 |
+
};
|
| 127 |
+
|
| 128 |
+
// A child RRef uses this message to notify its parent that the child has been
|
| 129 |
+
// confirmed by the owner.
|
| 130 |
+
class TORCH_API RRefChildAccept final : public RpcCommandBase {
|
| 131 |
+
public:
|
| 132 |
+
explicit RRefChildAccept(const ForkId& forkId) : forkId_(forkId) {}
|
| 133 |
+
|
| 134 |
+
const ForkId& forkId() const;
|
| 135 |
+
|
| 136 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 137 |
+
static std::unique_ptr<RRefChildAccept> fromMessage(const Message& message);
|
| 138 |
+
|
| 139 |
+
private:
|
| 140 |
+
const ForkId forkId_;
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
// A child RRef uses this message to send a fork request to the owner.
|
| 144 |
+
class TORCH_API RRefForkRequest final : public ForkMessageBase {
|
| 145 |
+
public:
|
| 146 |
+
RRefForkRequest(const RRefId& rrefId, const ForkId& forkId)
|
| 147 |
+
: ForkMessageBase(rrefId, forkId, MessageType::RREF_FORK_REQUEST) {}
|
| 148 |
+
|
| 149 |
+
static std::unique_ptr<RRefForkRequest> fromMessage(const Message& message);
|
| 150 |
+
};
|
| 151 |
+
|
| 152 |
+
class TORCH_API RRefAck final : public RpcCommandBase {
|
| 153 |
+
public:
|
| 154 |
+
RRefAck() = default;
|
| 155 |
+
|
| 156 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 157 |
+
static std::unique_ptr<RRefAck> fromMessage(const Message& message);
|
| 158 |
+
};
|
| 159 |
+
|
| 160 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_call.h
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/jit/runtime/operator.h>
|
| 6 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 7 |
+
#include <optional>
|
| 8 |
+
#include <vector>
|
| 9 |
+
|
| 10 |
+
namespace torch {
|
| 11 |
+
namespace distributed {
|
| 12 |
+
namespace rpc {
|
| 13 |
+
|
| 14 |
+
using torch::jit::Operator;
|
| 15 |
+
|
| 16 |
+
// A ScriptCall instance represents an invocation of a builtin operator for a
|
| 17 |
+
// TorchScript function. If it is a builtin operator, it
|
| 18 |
+
// contains a shared ptr to the `Operator` and a list of arguments.
|
| 19 |
+
// If it is a TorchScript function, it contains a non empty qualifiedName string
|
| 20 |
+
// to the TorchScript function schema name and a list of arguments.
|
| 21 |
+
class TORCH_API ScriptCall : public RpcCommandBase {
|
| 22 |
+
public:
|
| 23 |
+
// Constructor for builitin operator call.
|
| 24 |
+
ScriptCall(std::shared_ptr<Operator> op, std::vector<at::IValue>&& stack);
|
| 25 |
+
// Constructor for TorchScript function call.
|
| 26 |
+
ScriptCall(
|
| 27 |
+
const c10::QualifiedName& qualifiedName,
|
| 28 |
+
std::vector<at::IValue>&& stack,
|
| 29 |
+
const bool isAsyncExecution = false);
|
| 30 |
+
|
| 31 |
+
bool hasOp() const;
|
| 32 |
+
std::shared_ptr<Operator> op() const;
|
| 33 |
+
bool hasQualifiedName() const;
|
| 34 |
+
const c10::QualifiedName& qualifiedName() const;
|
| 35 |
+
// return the argument stack of this builtin operator
|
| 36 |
+
const std::vector<at::IValue>& stack() const;
|
| 37 |
+
std::vector<at::IValue>& stackRef();
|
| 38 |
+
inline bool isAsyncExecution() const {
|
| 39 |
+
return isAsyncExecution_;
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 43 |
+
static std::unique_ptr<ScriptCall> fromMessage(const Message& message);
|
| 44 |
+
|
| 45 |
+
~ScriptCall() override = default;
|
| 46 |
+
|
| 47 |
+
protected:
|
| 48 |
+
virtual void toIValues(std::vector<at::IValue>& ivalues) const;
|
| 49 |
+
static std::unique_ptr<ScriptCall> fromIValues(
|
| 50 |
+
std::vector<at::IValue>& ivalues);
|
| 51 |
+
|
| 52 |
+
private:
|
| 53 |
+
// Given an operator symbol and a string schema, return the matched operator.
|
| 54 |
+
static std::shared_ptr<Operator> matchOperator(const std::string& str_schema);
|
| 55 |
+
|
| 56 |
+
static const std::string BUILTIN_OP_NAMESPACE_;
|
| 57 |
+
static const std::string ATEN_PREFIX_;
|
| 58 |
+
|
| 59 |
+
// This field has value if this ScriptCall represents invocation of a builtin
|
| 60 |
+
// operator.
|
| 61 |
+
std::optional<std::shared_ptr<Operator>> op_;
|
| 62 |
+
// This field has non empty string if this ScriptCall represents invocation of
|
| 63 |
+
// an annotated torchscript function defined by users.
|
| 64 |
+
std::optional<const c10::QualifiedName> qualifiedName_;
|
| 65 |
+
std::vector<at::IValue> stack_;
|
| 66 |
+
const bool isAsyncExecution_;
|
| 67 |
+
};
|
| 68 |
+
|
| 69 |
+
} // namespace rpc
|
| 70 |
+
} // namespace distributed
|
| 71 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_remote_call.h
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/script_call.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
#include <torch/csrc/jit/runtime/operator.h>
|
| 6 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 7 |
+
#include <vector>
|
| 8 |
+
|
| 9 |
+
namespace torch {
|
| 10 |
+
namespace distributed {
|
| 11 |
+
namespace rpc {
|
| 12 |
+
|
| 13 |
+
using torch::jit::Operator;
|
| 14 |
+
|
| 15 |
+
// A ScriptRemoteCall instance represents an invocation of `dist.remote` on a
|
| 16 |
+
// builtin operator. Currently, it does not support using RRef as arguments yet.
|
| 17 |
+
// Besides the operator and a vector of arguments, ScriptRemoteCall also
|
| 18 |
+
// contains the RRefId and the ForkId of the return value RRef.
|
| 19 |
+
class TORCH_API ScriptRemoteCall final : public ScriptCall {
|
| 20 |
+
public:
|
| 21 |
+
// Constructor for builitin operator call.
|
| 22 |
+
ScriptRemoteCall(
|
| 23 |
+
std::shared_ptr<Operator> op,
|
| 24 |
+
std::vector<at::IValue>&& stack,
|
| 25 |
+
const RRefId& retRRefId,
|
| 26 |
+
const ForkId& retForkId);
|
| 27 |
+
|
| 28 |
+
// Constructor for TorchScript function call.
|
| 29 |
+
ScriptRemoteCall(
|
| 30 |
+
const c10::QualifiedName& qualifiedName,
|
| 31 |
+
std::vector<at::IValue>&& stack,
|
| 32 |
+
const RRefId& retRRefId,
|
| 33 |
+
const ForkId& retForkId,
|
| 34 |
+
const bool isAsyncExecution);
|
| 35 |
+
|
| 36 |
+
inline const RRefId& retRRefId() const {
|
| 37 |
+
return retRRefId_;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
inline const ForkId& retForkId() const {
|
| 41 |
+
return retForkId_;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
static std::unique_ptr<ScriptRemoteCall> fromIValues(
|
| 45 |
+
std::vector<at::IValue>& ivalues);
|
| 46 |
+
|
| 47 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 48 |
+
static std::unique_ptr<ScriptRemoteCall> fromMessage(const Message& message);
|
| 49 |
+
|
| 50 |
+
private:
|
| 51 |
+
const RRefId retRRefId_;
|
| 52 |
+
const ForkId retForkId_;
|
| 53 |
+
};
|
| 54 |
+
|
| 55 |
+
} // namespace rpc
|
| 56 |
+
} // namespace distributed
|
| 57 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/script_resp.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/message.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 5 |
+
#include <torch/csrc/jit/serialization/pickler.h>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
namespace distributed {
|
| 9 |
+
namespace rpc {
|
| 10 |
+
|
| 11 |
+
// Return value of a builtin operator or a TorchScript function.
|
| 12 |
+
class TORCH_API ScriptResp final : public RpcCommandBase {
|
| 13 |
+
public:
|
| 14 |
+
explicit ScriptResp(at::IValue&& values);
|
| 15 |
+
|
| 16 |
+
const at::IValue& value();
|
| 17 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 18 |
+
static std::unique_ptr<ScriptResp> fromMessage(const Message& message);
|
| 19 |
+
|
| 20 |
+
private:
|
| 21 |
+
const at::IValue value_;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
} // namespace rpc
|
| 25 |
+
} // namespace distributed
|
| 26 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_agent.h
ADDED
|
@@ -0,0 +1,492 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_TENSORPIPE
|
| 4 |
+
|
| 5 |
+
#include <atomic>
|
| 6 |
+
#include <thread>
|
| 7 |
+
|
| 8 |
+
#include <c10/core/thread_pool.h>
|
| 9 |
+
#include <torch/csrc/distributed/c10d/PrefixStore.hpp>
|
| 10 |
+
#include <torch/csrc/distributed/c10d/Store.hpp>
|
| 11 |
+
#include <torch/csrc/distributed/rpc/rpc_agent.h>
|
| 12 |
+
#include <utility>
|
| 13 |
+
|
| 14 |
+
// Forward-declare the TensorPipe classes we need, to avoid including its
|
| 15 |
+
// headers in PyTorch's ones and thus have it become a public dependency.
|
| 16 |
+
|
| 17 |
+
namespace tensorpipe {
|
| 18 |
+
|
| 19 |
+
class Context;
|
| 20 |
+
class Error;
|
| 21 |
+
class Listener;
|
| 22 |
+
class Message;
|
| 23 |
+
class Pipe;
|
| 24 |
+
|
| 25 |
+
namespace transport {
|
| 26 |
+
class Context;
|
| 27 |
+
} // namespace transport
|
| 28 |
+
|
| 29 |
+
namespace channel {
|
| 30 |
+
class Context;
|
| 31 |
+
} // namespace channel
|
| 32 |
+
|
| 33 |
+
} // namespace tensorpipe
|
| 34 |
+
|
| 35 |
+
namespace torch::distributed::rpc {
|
| 36 |
+
|
| 37 |
+
// These priorities instruct TensorPipe on which transport/channel to pick
|
| 38 |
+
// during handshake. Higher priorities will take precedence over lower ones.
|
| 39 |
+
// The transport with lowest priority will be the one used to bootstrap pipes.
|
| 40 |
+
|
| 41 |
+
constexpr int64_t kShmTransportPriority = 200;
|
| 42 |
+
constexpr int64_t kIbvTransportPriority = 100;
|
| 43 |
+
// The UV transport just uses TCP and should work everywhere, thus keep it last.
|
| 44 |
+
constexpr int64_t kUvTransportPriority = 0;
|
| 45 |
+
|
| 46 |
+
constexpr int64_t kCmaChannelPriority = 1200;
|
| 47 |
+
constexpr int64_t kMultiplexedUvChannelPriority = 1100;
|
| 48 |
+
// The basic channel reuses a transport as a channel, and is thus our fallback.
|
| 49 |
+
constexpr int64_t kBasicChannelPriority = 1000;
|
| 50 |
+
|
| 51 |
+
// CPU channel have higher priority than CUDA channels, since the latter might
|
| 52 |
+
// handle CPU-to-CPU transfers, but will always be less efficient than their
|
| 53 |
+
// CPU-only counterparts.
|
| 54 |
+
constexpr int64_t kCudaIpcChannelPriority = 300;
|
| 55 |
+
constexpr int64_t kCudaGdrChannelPriority = 200;
|
| 56 |
+
constexpr int64_t kCudaXthChannelPriority = 400;
|
| 57 |
+
constexpr int64_t kCudaBasicChannelPriority = 0;
|
| 58 |
+
|
| 59 |
+
using steady_clock_time_point =
|
| 60 |
+
std::chrono::time_point<std::chrono::steady_clock>;
|
| 61 |
+
|
| 62 |
+
struct TORCH_API TransportRegistration {
|
| 63 |
+
std::shared_ptr<tensorpipe::transport::Context> transport;
|
| 64 |
+
int64_t priority;
|
| 65 |
+
std::string address;
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
C10_DECLARE_REGISTRY(TensorPipeTransportRegistry, TransportRegistration);
|
| 69 |
+
|
| 70 |
+
struct TORCH_API ChannelRegistration {
|
| 71 |
+
std::shared_ptr<tensorpipe::channel::Context> channel;
|
| 72 |
+
int64_t priority;
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
C10_DECLARE_REGISTRY(TensorPipeChannelRegistry, ChannelRegistration);
|
| 76 |
+
|
| 77 |
+
constexpr auto kDefaultNumWorkerThreads = 16;
|
| 78 |
+
|
| 79 |
+
struct TORCH_API TensorPipeRpcBackendOptions : public RpcBackendOptions {
|
| 80 |
+
TensorPipeRpcBackendOptions(
|
| 81 |
+
int numWorkerThreads,
|
| 82 |
+
std::optional<std::vector<std::string>> transports,
|
| 83 |
+
std::optional<std::vector<std::string>> channels,
|
| 84 |
+
float rpc_timeout,
|
| 85 |
+
std::string init_method,
|
| 86 |
+
std::unordered_map<std::string, DeviceMap> device_maps = {},
|
| 87 |
+
std::vector<c10::Device> devices = {})
|
| 88 |
+
: RpcBackendOptions(rpc_timeout, std::move(init_method)),
|
| 89 |
+
numWorkerThreads(numWorkerThreads),
|
| 90 |
+
transports(std::move(transports)),
|
| 91 |
+
channels(std::move(channels)),
|
| 92 |
+
deviceMaps(std::move(device_maps)),
|
| 93 |
+
devices(std::move(devices)) {
|
| 94 |
+
TORCH_CHECK(
|
| 95 |
+
numWorkerThreads > 0,
|
| 96 |
+
"num_worker_threads must be positive, got ",
|
| 97 |
+
numWorkerThreads);
|
| 98 |
+
|
| 99 |
+
if (this->transports.has_value()) {
|
| 100 |
+
for (const std::string& transportName : this->transports.value()) {
|
| 101 |
+
TORCH_CHECK(
|
| 102 |
+
TensorPipeTransportRegistry()->Has(transportName),
|
| 103 |
+
"Unknown transport: ",
|
| 104 |
+
transportName);
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
if (this->channels.has_value()) {
|
| 109 |
+
for (const std::string& channelName : this->channels.value()) {
|
| 110 |
+
TORCH_CHECK(
|
| 111 |
+
TensorPipeChannelRegistry()->Has(channelName),
|
| 112 |
+
"Unknown channel: ",
|
| 113 |
+
channelName);
|
| 114 |
+
}
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
void setDeviceMap(const std::string& workerName, const DeviceMap& deviceMap) {
|
| 119 |
+
auto iter = deviceMaps.find(workerName);
|
| 120 |
+
if (iter == deviceMaps.end()) {
|
| 121 |
+
deviceMaps[workerName] = deviceMap;
|
| 122 |
+
} else {
|
| 123 |
+
for (auto& entry : deviceMap) {
|
| 124 |
+
// c10::Device has no default constructor, hence map[device] dosn't work
|
| 125 |
+
// In C++-17 we can use insert_or_assign.
|
| 126 |
+
auto entryIter = iter->second.find(entry.first);
|
| 127 |
+
if (entryIter == iter->second.end()) {
|
| 128 |
+
iter->second.emplace(entry.first, entry.second);
|
| 129 |
+
} else {
|
| 130 |
+
entryIter->second = entry.second;
|
| 131 |
+
}
|
| 132 |
+
}
|
| 133 |
+
}
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
int numWorkerThreads;
|
| 137 |
+
const std::optional<std::vector<std::string>> transports;
|
| 138 |
+
const std::optional<std::vector<std::string>> channels;
|
| 139 |
+
std::unordered_map<std::string, DeviceMap> deviceMaps;
|
| 140 |
+
std::vector<c10::Device> devices;
|
| 141 |
+
};
|
| 142 |
+
|
| 143 |
+
// Struct to track the network source metrics
|
| 144 |
+
struct TORCH_API NetworkSourceInfo {
|
| 145 |
+
worker_id_t srcRank;
|
| 146 |
+
std::vector<uint8_t> srcMachineAddr;
|
| 147 |
+
};
|
| 148 |
+
|
| 149 |
+
// Struct to track aggregated network metrics
|
| 150 |
+
struct TORCH_API AggregatedNetworkData {
|
| 151 |
+
uint64_t numCalls{0};
|
| 152 |
+
uint64_t totalSentBytes{0};
|
| 153 |
+
uint64_t totalRecvBytes{0};
|
| 154 |
+
uint64_t totalErrors{0};
|
| 155 |
+
};
|
| 156 |
+
|
| 157 |
+
// TensorPipeAgent leverages TensorPipe (https://github.com/pytorch/tensorpipe)
|
| 158 |
+
// to transparently move tensors and payloads through the fastest available
|
| 159 |
+
// transport or channel. It acts like a hybrid RPC transport, providing shared
|
| 160 |
+
// memory (linux) and TCP (linux & mac) support. CUDA support is in progress.
|
| 161 |
+
class TORCH_API TensorPipeAgent : public RpcAgent {
|
| 162 |
+
public:
|
| 163 |
+
TensorPipeAgent(
|
| 164 |
+
const c10::intrusive_ptr<::c10d::Store>& store,
|
| 165 |
+
std::string selfName,
|
| 166 |
+
worker_id_t selfId,
|
| 167 |
+
std::optional<int> worldSize,
|
| 168 |
+
TensorPipeRpcBackendOptions opts,
|
| 169 |
+
std::unordered_map<std::string, DeviceMap> reverseDeviceMaps,
|
| 170 |
+
std::vector<c10::Device> devices,
|
| 171 |
+
std::unique_ptr<RequestCallback> cb);
|
| 172 |
+
|
| 173 |
+
TensorPipeAgent(const TensorPipeAgent&) = delete;
|
| 174 |
+
TensorPipeAgent& operator=(const TensorPipeAgent&) = delete;
|
| 175 |
+
|
| 176 |
+
c10::intrusive_ptr<JitFuture> send(
|
| 177 |
+
const WorkerInfo& to,
|
| 178 |
+
c10::intrusive_ptr<Message> message,
|
| 179 |
+
const float rpcTimeoutSeconds = kUnsetRpcTimeout,
|
| 180 |
+
const DeviceMap& deviceMap = {}) override;
|
| 181 |
+
|
| 182 |
+
// join() and sync() would be deprecated -
|
| 183 |
+
// https://github.com/pytorch/pytorch/issues/27647
|
| 184 |
+
void join(bool shutdown = false, float timeout = 0) override;
|
| 185 |
+
void sync() override{};
|
| 186 |
+
void startImpl() override;
|
| 187 |
+
void shutdownImpl() override;
|
| 188 |
+
|
| 189 |
+
~TensorPipeAgent() override;
|
| 190 |
+
|
| 191 |
+
const WorkerInfo& getWorkerInfo(const std::string& workerName) const override;
|
| 192 |
+
const WorkerInfo& getWorkerInfo(worker_id_t workerId) const override;
|
| 193 |
+
std::vector<WorkerInfo> getWorkerInfos() const override;
|
| 194 |
+
void updateGroupMembership(
|
| 195 |
+
const WorkerInfo& workerInfo,
|
| 196 |
+
const std::vector<c10::Device>& devices,
|
| 197 |
+
const std::unordered_map<std::string, DeviceMap>& reverseDeviceMaps,
|
| 198 |
+
bool isJoin);
|
| 199 |
+
|
| 200 |
+
std::unordered_map<std::string, std::string> getMetrics() override;
|
| 201 |
+
|
| 202 |
+
void addGilWaitTime(const std::chrono::microseconds gilWaitTime) override;
|
| 203 |
+
|
| 204 |
+
TensorPipeRpcBackendOptions getBackendOptions() const;
|
| 205 |
+
|
| 206 |
+
const c10::intrusive_ptr<::c10d::Store> getStore() const;
|
| 207 |
+
|
| 208 |
+
DeviceMap getDeviceMap(const WorkerInfo& dest) const override;
|
| 209 |
+
|
| 210 |
+
const std::vector<c10::Device>& getDevices() const override;
|
| 211 |
+
|
| 212 |
+
using NetworkDataDict =
|
| 213 |
+
std::unordered_map<std::string, AggregatedNetworkData>;
|
| 214 |
+
|
| 215 |
+
// Returns metrics tracked by the NetworkDataDict
|
| 216 |
+
NetworkDataDict getNetworkData();
|
| 217 |
+
// Returns NetworkSourceInfo struct
|
| 218 |
+
NetworkSourceInfo getNetworkSourceInfo();
|
| 219 |
+
|
| 220 |
+
static const std::string& guessAddress();
|
| 221 |
+
|
| 222 |
+
// For testing purposes.
|
| 223 |
+
size_t timeoutMapSize();
|
| 224 |
+
size_t numPendingResponses();
|
| 225 |
+
size_t messageIdToTimeoutMapSize();
|
| 226 |
+
|
| 227 |
+
const bool isStaticGroup_;
|
| 228 |
+
|
| 229 |
+
protected:
|
| 230 |
+
// TensorPipe write function that could be used to write response
|
| 231 |
+
// messages by server, and write request messages by client. This
|
| 232 |
+
// is a protected method since it is overwritten by FaultyTensorPipeAgent
|
| 233 |
+
virtual void pipeWrite(
|
| 234 |
+
const std::shared_ptr<tensorpipe::Pipe>&,
|
| 235 |
+
c10::intrusive_ptr<Message> message,
|
| 236 |
+
std::vector<c10::Device>&& devices,
|
| 237 |
+
std::vector<c10::Stream> streams,
|
| 238 |
+
std::function<void(const tensorpipe::Error&)>) noexcept;
|
| 239 |
+
|
| 240 |
+
private:
|
| 241 |
+
// Removes the given messageId with the given expirationTime from the
|
| 242 |
+
// timeoutMap_.
|
| 243 |
+
void removeFromTimeoutMap(uint64_t messageId);
|
| 244 |
+
|
| 245 |
+
// Populates workerIdToInfo_ and workerNameToInfo_ using addressStore_
|
| 246 |
+
void prepareNames(bool isStaticGroup);
|
| 247 |
+
|
| 248 |
+
// Check the static group attribute with the value set in store
|
| 249 |
+
void checkAndSetStaticGroup(const c10::intrusive_ptr<::c10d::Store>& store);
|
| 250 |
+
|
| 251 |
+
const std::string& findWorkerURL(const WorkerInfo& worker) const;
|
| 252 |
+
|
| 253 |
+
// Only use for Dynamic RPC groups, method to have worker leave group
|
| 254 |
+
void leaveGroup();
|
| 255 |
+
|
| 256 |
+
// TensorPipe read function that could be used to read response messages
|
| 257 |
+
// by client, and read request messages by server.
|
| 258 |
+
void pipeRead(
|
| 259 |
+
const std::shared_ptr<tensorpipe::Pipe>&,
|
| 260 |
+
std::function<void(
|
| 261 |
+
const tensorpipe::Error&,
|
| 262 |
+
c10::intrusive_ptr<Message>,
|
| 263 |
+
std::vector<c10::Stream>)>) noexcept;
|
| 264 |
+
|
| 265 |
+
// Callback of listener accept()
|
| 266 |
+
void onListenerAccepted(
|
| 267 |
+
const tensorpipe::Error& error,
|
| 268 |
+
std::shared_ptr<tensorpipe::Pipe>& pipe);
|
| 269 |
+
|
| 270 |
+
// Respond to a call from a peer
|
| 271 |
+
void respond(std::shared_ptr<tensorpipe::Pipe>& pipe);
|
| 272 |
+
|
| 273 |
+
void sendCompletedResponseMessage(
|
| 274 |
+
std::shared_ptr<tensorpipe::Pipe>& pipe,
|
| 275 |
+
JitFuture& futureResponseMessage,
|
| 276 |
+
uint64_t messageId,
|
| 277 |
+
std::vector<c10::Stream> stream);
|
| 278 |
+
|
| 279 |
+
// Collects metrics from successful RPC calls
|
| 280 |
+
void trackNetworkData(
|
| 281 |
+
uint64_t requestSize,
|
| 282 |
+
uint64_t responseSize,
|
| 283 |
+
const std::string& destWorkerName);
|
| 284 |
+
|
| 285 |
+
// Collects metrics from failed RPC calls
|
| 286 |
+
void trackNetworkError(
|
| 287 |
+
uint64_t requestSize,
|
| 288 |
+
const std::string& destWorkerName);
|
| 289 |
+
|
| 290 |
+
inline std::vector<c10::Device> getDevicesForRemote(
|
| 291 |
+
const std::string& remoteName,
|
| 292 |
+
const Message& message) const;
|
| 293 |
+
|
| 294 |
+
// When a request+response completes, we need to mark the future message as
|
| 295 |
+
// complete. However, if its timeout has already expired, it already has an
|
| 296 |
+
// error set. There is no atomic "test-and-set" way to mark a future complete
|
| 297 |
+
// only if it isn't yet. It does exist for errors (setErrorIfNeeded) but, even
|
| 298 |
+
// then, it ends up printing a log message, which may worry the user. To solve
|
| 299 |
+
// both issues we use a separate atomic flag to know the status of the future.
|
| 300 |
+
struct AtomicJitFuture {
|
| 301 |
+
explicit AtomicJitFuture(const std::vector<c10::Device>& devices) {
|
| 302 |
+
jitFuture = c10::make_intrusive<at::ivalue::Future>(
|
| 303 |
+
at::AnyClassType::get(), devices);
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
std::atomic_flag isComplete = ATOMIC_FLAG_INIT;
|
| 307 |
+
c10::intrusive_ptr<JitFuture> jitFuture;
|
| 308 |
+
};
|
| 309 |
+
|
| 310 |
+
// Maintains state per client pipe to track pending response messages and
|
| 311 |
+
// error states. pendingResponseMessage_ should be protected by a mutex since
|
| 312 |
+
// it can be raced with user send() call.
|
| 313 |
+
// TODO: To achieve better performance we can have a pipe pool per
|
| 314 |
+
// client that can be configured using RpcBackendOptions.
|
| 315 |
+
struct ClientPipe {
|
| 316 |
+
explicit ClientPipe(std::shared_ptr<tensorpipe::Pipe> pipe)
|
| 317 |
+
: pipe_(std::move(pipe)) {}
|
| 318 |
+
std::shared_ptr<tensorpipe::Pipe> pipe_;
|
| 319 |
+
mutable std::mutex mutex_;
|
| 320 |
+
bool inError_{false};
|
| 321 |
+
// Map from Message Request ID's to corresponding futures.
|
| 322 |
+
std::unordered_map<uint64_t, std::shared_ptr<AtomicJitFuture>>
|
| 323 |
+
pendingResponseMessage_;
|
| 324 |
+
};
|
| 325 |
+
|
| 326 |
+
const c10::intrusive_ptr<::c10d::Store> store_;
|
| 327 |
+
|
| 328 |
+
const TensorPipeRpcBackendOptions opts_;
|
| 329 |
+
// For dynamic RPC, the reverse device maps are updated whenever a new rank
|
| 330 |
+
// joins or leaves the group
|
| 331 |
+
std::unordered_map<std::string, DeviceMap> reverseDeviceMaps_;
|
| 332 |
+
// Local devices used by this agent. If application didn't specify this
|
| 333 |
+
// field, it will be initialized using corresponding local devices in
|
| 334 |
+
// opts_.deviceMaps and reverseDeviceMaps_;
|
| 335 |
+
std::vector<c10::Device> devices_;
|
| 336 |
+
|
| 337 |
+
ThreadPool threadPool_;
|
| 338 |
+
std::shared_ptr<tensorpipe::Context> context_;
|
| 339 |
+
std::shared_ptr<tensorpipe::Listener> listener_;
|
| 340 |
+
|
| 341 |
+
mutable std::mutex connectedPipesMutex_;
|
| 342 |
+
std::unordered_map<worker_id_t, ClientPipe> connectedPipes_;
|
| 343 |
+
|
| 344 |
+
// Maps keyed on name and id for easy WorkerInfo lookup.
|
| 345 |
+
std::unordered_map<worker_id_t, WorkerInfo> workerIdToInfo_;
|
| 346 |
+
std::unordered_map<std::string, WorkerInfo> workerNameToInfo_;
|
| 347 |
+
std::unordered_map<std::string, std::string> workerNameToURL_;
|
| 348 |
+
|
| 349 |
+
::c10d::PrefixStore rankToNameStore_;
|
| 350 |
+
::c10d::PrefixStore nameToAddressStore_;
|
| 351 |
+
// Store keys that will used to count joined processes and active calls during
|
| 352 |
+
// the shutdown process
|
| 353 |
+
::c10d::PrefixStore shutdownStore_;
|
| 354 |
+
int worldSize_ = 0;
|
| 355 |
+
std::atomic<uint64_t> nextMessageID_{0};
|
| 356 |
+
|
| 357 |
+
// Metadata used for tracking of whether certain RPCs have timed out or not.
|
| 358 |
+
struct TimeoutMessageMetadata {
|
| 359 |
+
TimeoutMessageMetadata(
|
| 360 |
+
uint64_t messageId_,
|
| 361 |
+
std::shared_ptr<AtomicJitFuture> responseFuture_,
|
| 362 |
+
std::chrono::milliseconds timeout_)
|
| 363 |
+
: messageId(messageId_),
|
| 364 |
+
responseFuture(std::move(responseFuture_)),
|
| 365 |
+
timeout(timeout_) {}
|
| 366 |
+
uint64_t messageId;
|
| 367 |
+
std::shared_ptr<AtomicJitFuture> responseFuture;
|
| 368 |
+
std::chrono::milliseconds timeout;
|
| 369 |
+
};
|
| 370 |
+
|
| 371 |
+
// Map to store the expiration times for each message.
|
| 372 |
+
std::map<steady_clock_time_point, std::vector<TimeoutMessageMetadata>>
|
| 373 |
+
timeoutMap_;
|
| 374 |
+
|
| 375 |
+
// Map to store the messageId to expiry time.
|
| 376 |
+
std::unordered_map<uint64_t, steady_clock_time_point> messageIdToTimeout_;
|
| 377 |
+
|
| 378 |
+
// Thread that will poll the timeoutMap_ for timed out messages and mark them
|
| 379 |
+
// with an error accordingly
|
| 380 |
+
std::thread timeoutThread_;
|
| 381 |
+
|
| 382 |
+
// Function run by the timeoutThread_ to check for timed out RPCs
|
| 383 |
+
void pollTimeoutRpcs();
|
| 384 |
+
|
| 385 |
+
// Mutex to guard the timeoutMap_
|
| 386 |
+
std::mutex timeoutMapMutex_;
|
| 387 |
+
|
| 388 |
+
// Condition Variable to signal population of the timeoutMap_
|
| 389 |
+
std::condition_variable timeoutThreadCV_;
|
| 390 |
+
|
| 391 |
+
// Returns the expiration time for an RPC by adding the current time to the
|
| 392 |
+
// passed in timeout.
|
| 393 |
+
inline steady_clock_time_point computeRpcMessageExpiryTime(
|
| 394 |
+
std::chrono::milliseconds timeout) const {
|
| 395 |
+
return std::chrono::time_point_cast<std::chrono::milliseconds>(
|
| 396 |
+
std::chrono::steady_clock::now() + timeout);
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
// Handle error on an outgoing pipe
|
| 400 |
+
void handleClientError(
|
| 401 |
+
ClientPipe& clientPipe,
|
| 402 |
+
const tensorpipe::Error& error);
|
| 403 |
+
|
| 404 |
+
// This is a generic struct for capturing Time-Series Metrics. It keeps a
|
| 405 |
+
// running sum and count of data points (observations), and can return an
|
| 406 |
+
// average of the data points seen so far. This is currently only used for
|
| 407 |
+
// tracking the GIL Wait Time in RPC Agents, but can be used for other metrics
|
| 408 |
+
// as well.
|
| 409 |
+
struct TimeSeriesMetricsTracker {
|
| 410 |
+
// Running sum of the data points seen so far
|
| 411 |
+
uint64_t currentSum_;
|
| 412 |
+
// Running count of the data points seen so far
|
| 413 |
+
uint64_t currentCount_;
|
| 414 |
+
|
| 415 |
+
explicit TimeSeriesMetricsTracker(
|
| 416 |
+
uint64_t currentSum = 0,
|
| 417 |
+
uint64_t currentCount = 0);
|
| 418 |
+
|
| 419 |
+
// Adds a data point (which is basically one observation for the metric
|
| 420 |
+
// being tracked) to the running sum and count.
|
| 421 |
+
void addData(uint64_t dataPoint);
|
| 422 |
+
// Returns the average of all the data points seen so far.
|
| 423 |
+
float computeAverage() const;
|
| 424 |
+
};
|
| 425 |
+
|
| 426 |
+
// Map of Time-Series metrics tracked by the RPC Agent
|
| 427 |
+
std::unordered_map<std::string, TimeSeriesMetricsTracker> timeSeriesMetrics_;
|
| 428 |
+
// Mutex to guard timeSeriesMetrics_
|
| 429 |
+
std::mutex metricsMutex_;
|
| 430 |
+
|
| 431 |
+
// Custom lock guard used to check if the RPC group is dynamic and lock the
|
| 432 |
+
// mutex if so
|
| 433 |
+
struct GroupMembershipLockGuard {
|
| 434 |
+
GroupMembershipLockGuard(std::mutex& mutex, bool isStaticGroup)
|
| 435 |
+
: ref_(mutex), isStaticGroup_(isStaticGroup) {
|
| 436 |
+
if (isStaticGroup_) {
|
| 437 |
+
ref_.lock();
|
| 438 |
+
}
|
| 439 |
+
}
|
| 440 |
+
|
| 441 |
+
~GroupMembershipLockGuard() {
|
| 442 |
+
if (isStaticGroup_) {
|
| 443 |
+
ref_.unlock();
|
| 444 |
+
}
|
| 445 |
+
}
|
| 446 |
+
|
| 447 |
+
GroupMembershipLockGuard(const GroupMembershipLockGuard&) = delete;
|
| 448 |
+
|
| 449 |
+
private:
|
| 450 |
+
std::mutex& ref_;
|
| 451 |
+
bool isStaticGroup_;
|
| 452 |
+
};
|
| 453 |
+
// Mutex to guard access to group membership data
|
| 454 |
+
// e.g. updates to (workerIdToInfo_, workerNameToInfo_, workerNameToURL_)
|
| 455 |
+
mutable std::mutex groupMembershipMutex_;
|
| 456 |
+
|
| 457 |
+
// Map to Track Network Data
|
| 458 |
+
NetworkDataDict networkData_;
|
| 459 |
+
// Mutex to guard networkData_
|
| 460 |
+
std::mutex networkDataMutex_;
|
| 461 |
+
|
| 462 |
+
// A mutex and a cv to guard access to the call counts and watch for changes.
|
| 463 |
+
std::mutex callCountMutex_;
|
| 464 |
+
std::condition_variable callCountCV_;
|
| 465 |
+
// Running total of un-processed, un-errored RPC calls sent
|
| 466 |
+
int32_t clientActiveCalls_{0};
|
| 467 |
+
// Running total of un-processed RPC requests received
|
| 468 |
+
int32_t serverActiveCalls_{0};
|
| 469 |
+
// Running total of RPC requests that will be completed asynchronously
|
| 470 |
+
int32_t serverActiveAsyncCalls_{0};
|
| 471 |
+
|
| 472 |
+
// Whether a global graceful shutdown has begun, in which case we'll silence
|
| 473 |
+
// error messages due to remote workers closing their pipes.
|
| 474 |
+
std::atomic<bool> shuttingDown_{false};
|
| 475 |
+
|
| 476 |
+
// Helpers to modify the counts while correctly dealing with the mutex and cv.
|
| 477 |
+
void increaseCallCount(int32_t& count);
|
| 478 |
+
void decreaseCallCount(int32_t& count);
|
| 479 |
+
|
| 480 |
+
// Helpers to set the state of the requests.
|
| 481 |
+
void markFutureAsComplete(
|
| 482 |
+
std::shared_ptr<AtomicJitFuture> atomicFuture,
|
| 483 |
+
c10::intrusive_ptr<Message> message,
|
| 484 |
+
std::vector<c10::Stream> streams);
|
| 485 |
+
void markFutureWithError(
|
| 486 |
+
std::shared_ptr<AtomicJitFuture> atomicFuture,
|
| 487 |
+
std::string errorMsg);
|
| 488 |
+
};
|
| 489 |
+
|
| 490 |
+
} // namespace torch::distributed::rpc
|
| 491 |
+
|
| 492 |
+
#endif // USE_TENSORPIPE
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/tensorpipe_utils.h
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#ifdef USE_TENSORPIPE
|
| 4 |
+
|
| 5 |
+
#include <torch/csrc/distributed/rpc/utils.h>
|
| 6 |
+
|
| 7 |
+
namespace tensorpipe {
|
| 8 |
+
class Message;
|
| 9 |
+
class Allocation;
|
| 10 |
+
class Descriptor;
|
| 11 |
+
} // namespace tensorpipe
|
| 12 |
+
|
| 13 |
+
namespace torch::distributed::rpc {
|
| 14 |
+
|
| 15 |
+
TORCH_API const c10::Stream& getStreamForDevice(
|
| 16 |
+
const std::vector<c10::Stream>& streams,
|
| 17 |
+
const c10::Device& device);
|
| 18 |
+
|
| 19 |
+
// Inspired by c10/core/impl/DeviceGuardImplInterface.h.
|
| 20 |
+
|
| 21 |
+
class TensorpipeDeviceTypeConverter {
|
| 22 |
+
public:
|
| 23 |
+
// Ideally we'd want this to also return a tensorpipe::Message::Tensor object
|
| 24 |
+
// but we cannot forward-declare that class (because it's nested), and we
|
| 25 |
+
// cannot include the TensorPipe headers because it's a private dependency.
|
| 26 |
+
// Thus we bend over backwards and entrust this method with appending that
|
| 27 |
+
// object to the `tensors` field of the tensorpipe::Message object we pass.
|
| 28 |
+
virtual std::optional<std::vector<char>> prepareTensorForSending(
|
| 29 |
+
const c10::Storage& storage,
|
| 30 |
+
const std::vector<c10::Stream>& streams,
|
| 31 |
+
tensorpipe::Message& message) const = 0;
|
| 32 |
+
|
| 33 |
+
// Same as above: this method cannot return a tensorpipe::Allocation::Tensor,
|
| 34 |
+
// thus it appends it to the `tensors` field of the tensorpipe::Allocation.
|
| 35 |
+
virtual at::DataPtr allocateTensorForReceiving(
|
| 36 |
+
c10::DeviceIndex deviceIndex,
|
| 37 |
+
size_t length,
|
| 38 |
+
const std::vector<c10::Stream>& streams,
|
| 39 |
+
tensorpipe::Allocation& allocation) const = 0;
|
| 40 |
+
|
| 41 |
+
virtual ~TensorpipeDeviceTypeConverter() = default;
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
extern TORCH_API std::array<
|
| 45 |
+
std::atomic<const TensorpipeDeviceTypeConverter*>,
|
| 46 |
+
static_cast<size_t>(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)>
|
| 47 |
+
device_type_converter_registry;
|
| 48 |
+
|
| 49 |
+
class TORCH_API TensorpipeDeviceTypeConverterRegistrar {
|
| 50 |
+
public:
|
| 51 |
+
TensorpipeDeviceTypeConverterRegistrar(
|
| 52 |
+
DeviceType,
|
| 53 |
+
const TensorpipeDeviceTypeConverter*);
|
| 54 |
+
};
|
| 55 |
+
|
| 56 |
+
#define C10_REGISTER_TENSORPIPE_DEVICE_TYPE_CONVERTER( \
|
| 57 |
+
DevType, TensorpipeDeviceTypeConverter) \
|
| 58 |
+
static ::torch::distributed::rpc::TensorpipeDeviceTypeConverterRegistrar \
|
| 59 |
+
C10_ANONYMOUS_VARIABLE(g_##DeviceType)( \
|
| 60 |
+
::c10::DeviceType::DevType, new TensorpipeDeviceTypeConverter());
|
| 61 |
+
|
| 62 |
+
inline const TensorpipeDeviceTypeConverter* getDeviceTypeConverter(
|
| 63 |
+
DeviceType type) {
|
| 64 |
+
return device_type_converter_registry[static_cast<size_t>(type)].load();
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
// A struct that holds pointers that keep alive all the memory that will be
|
| 68 |
+
// accessed by TensorPipe during a write operation.
|
| 69 |
+
struct TensorpipeWriteBuffers {
|
| 70 |
+
// Allocate on heap so pointers stay valid as we move the holder.
|
| 71 |
+
std::unique_ptr<MessageType> type;
|
| 72 |
+
std::unique_ptr<int64_t> id;
|
| 73 |
+
std::vector<char> payload;
|
| 74 |
+
std::vector<char> pickle;
|
| 75 |
+
// This contains the original tensors and the clones of the sparse tensors.
|
| 76 |
+
std::vector<torch::Tensor> tensors;
|
| 77 |
+
// This contains the copies of the data of the tensors that didn't own their
|
| 78 |
+
// memory, e.g., the ones created from torch::from_blob() with no deleter.
|
| 79 |
+
std::vector<std::vector<char>> copiedTensors;
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
// A struct that holds pointers that keep alive all the memory that will be
|
| 83 |
+
// accessed by TensorPipe during a read operation.
|
| 84 |
+
struct TensorpipeReadBuffers {
|
| 85 |
+
// Allocate on heap so pointers stay valid as we move the holder.
|
| 86 |
+
std::unique_ptr<MessageType> type;
|
| 87 |
+
std::unique_ptr<int64_t> id;
|
| 88 |
+
std::vector<char> payload;
|
| 89 |
+
std::vector<char> pickle;
|
| 90 |
+
std::vector<c10::DataPtr> tensors;
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
// Convert an RPC message into a TensorPipe message, plus a holder to all the
|
| 94 |
+
// data that must be kept alive while the write is performed asynchronously.
|
| 95 |
+
TORCH_API std::tuple<tensorpipe::Message, TensorpipeWriteBuffers>
|
| 96 |
+
tensorpipeSerialize(
|
| 97 |
+
const c10::intrusive_ptr<Message>& rpcMessage,
|
| 98 |
+
std::vector<c10::Device> devices,
|
| 99 |
+
const std::vector<c10::Stream>& streams);
|
| 100 |
+
|
| 101 |
+
// Allocate the buffers that will hold the incoming data. They will be managed
|
| 102 |
+
// by the returned holder, which must be kept alive until the asynchronous read
|
| 103 |
+
// has finished. Pointers to these buffers will be stored in the returned
|
| 104 |
+
// tensorpipe::Allocation struct.
|
| 105 |
+
TORCH_API std::pair<tensorpipe::Allocation, TensorpipeReadBuffers>
|
| 106 |
+
tensorpipeAllocate(
|
| 107 |
+
const tensorpipe::Descriptor& tpDescriptor,
|
| 108 |
+
const std::vector<c10::Stream>& streams);
|
| 109 |
+
|
| 110 |
+
// Convert a TensorPipe message back into an RPC message. This requires the data
|
| 111 |
+
// to be available and can thus only be performed once the asynchronous read has
|
| 112 |
+
// completed. The holder can be destroyed once this function returns.
|
| 113 |
+
TORCH_API c10::intrusive_ptr<Message> tensorpipeDeserialize(
|
| 114 |
+
tensorpipe::Descriptor&& tpDescriptor,
|
| 115 |
+
TensorpipeReadBuffers&& holder);
|
| 116 |
+
|
| 117 |
+
} // namespace torch::distributed::rpc
|
| 118 |
+
|
| 119 |
+
#endif // USE_TENSORPIPE
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/torchscript_functions.h
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 5 |
+
#include <torch/csrc/distributed/autograd/utils.h>
|
| 6 |
+
#include <torch/csrc/distributed/rpc/rref_context.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/script_remote_call.h>
|
| 8 |
+
|
| 9 |
+
namespace torch::distributed::rpc {
|
| 10 |
+
|
| 11 |
+
// This function sends an rpc call to run torchscript function, currently the
|
| 12 |
+
// torchscript function could only be a user defined python function with
|
| 13 |
+
// "@torch.jit.script" annotation. The torchscript function could not be
|
| 14 |
+
// a class constructor, class method, instance method or a script module.
|
| 15 |
+
// dst: destination worker name
|
| 16 |
+
// qualifiedName: torchscript function qualified name string like
|
| 17 |
+
// "moduleName::torchscriptFunctionName", e.g,
|
| 18 |
+
// "dist_autograd_test::my_py_add"
|
| 19 |
+
// stack: a bag of IValue args passed to torchscriptFunctionName
|
| 20 |
+
// It returns c10::intrusive_ptr<ivalue::Future>
|
| 21 |
+
c10::intrusive_ptr<c10::ivalue::Future> TORCH_API rpcTorchscript(
|
| 22 |
+
const std::string& dstWorkerName,
|
| 23 |
+
const c10::QualifiedName& qualifiedName,
|
| 24 |
+
const c10::FunctionSchema& functionSchema,
|
| 25 |
+
std::vector<c10::IValue>& stack,
|
| 26 |
+
const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout,
|
| 27 |
+
const bool isAsyncExecution = false);
|
| 28 |
+
|
| 29 |
+
c10::intrusive_ptr<RRef> TORCH_API remoteTorchscript(
|
| 30 |
+
const std::string& dstWorkerName,
|
| 31 |
+
const c10::QualifiedName& qualifiedName,
|
| 32 |
+
const c10::FunctionSchema& functionSchema,
|
| 33 |
+
std::vector<c10::IValue>& stack,
|
| 34 |
+
const float rpcTimeoutSeconds = torch::distributed::rpc::kUnsetRpcTimeout,
|
| 35 |
+
const bool isAsyncExecution = false);
|
| 36 |
+
|
| 37 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/types.h
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/ivalue.h>
|
| 4 |
+
#include <atomic>
|
| 5 |
+
|
| 6 |
+
namespace torch::distributed::rpc {
|
| 7 |
+
|
| 8 |
+
using worker_id_t = int16_t;
|
| 9 |
+
using local_id_t = int64_t;
|
| 10 |
+
|
| 11 |
+
bool getAllowJitRRefPickle();
|
| 12 |
+
TORCH_API void enableJitRRefPickle();
|
| 13 |
+
TORCH_API void disableJitRRefPickle();
|
| 14 |
+
|
| 15 |
+
struct TORCH_API JitRRefPickleGuard {
|
| 16 |
+
JitRRefPickleGuard();
|
| 17 |
+
~JitRRefPickleGuard();
|
| 18 |
+
};
|
| 19 |
+
|
| 20 |
+
struct TORCH_API GloballyUniqueId final {
|
| 21 |
+
GloballyUniqueId(worker_id_t createdOn, local_id_t localId);
|
| 22 |
+
GloballyUniqueId(const GloballyUniqueId& other) = default;
|
| 23 |
+
GloballyUniqueId& operator=(const GloballyUniqueId& other) = delete;
|
| 24 |
+
|
| 25 |
+
bool operator==(const GloballyUniqueId& other) const;
|
| 26 |
+
bool operator!=(const GloballyUniqueId& other) const;
|
| 27 |
+
|
| 28 |
+
at::IValue toIValue() const;
|
| 29 |
+
static GloballyUniqueId fromIValue(const at::IValue&);
|
| 30 |
+
|
| 31 |
+
struct Hash {
|
| 32 |
+
size_t operator()(const GloballyUniqueId& key) const {
|
| 33 |
+
return (uint64_t(key.createdOn_) << kLocalIdBits) | key.localId_;
|
| 34 |
+
}
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
static constexpr int kLocalIdBits = 48;
|
| 38 |
+
|
| 39 |
+
const worker_id_t createdOn_;
|
| 40 |
+
const local_id_t localId_;
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
TORCH_API std::ostream& operator<<(
|
| 44 |
+
std::ostream& os,
|
| 45 |
+
const GloballyUniqueId& globalId);
|
| 46 |
+
|
| 47 |
+
using RRefId = GloballyUniqueId;
|
| 48 |
+
using ForkId = GloballyUniqueId;
|
| 49 |
+
using ProfilingId = GloballyUniqueId;
|
| 50 |
+
|
| 51 |
+
struct TORCH_API SerializedPyObj final {
|
| 52 |
+
SerializedPyObj(std::string&& payload, std::vector<at::Tensor>&& tensors)
|
| 53 |
+
: payload_(std::move(payload)), tensors_(std::move(tensors)) {}
|
| 54 |
+
|
| 55 |
+
std::vector<at::IValue> toIValues() &&;
|
| 56 |
+
static SerializedPyObj fromIValues(std::vector<at::IValue> value);
|
| 57 |
+
|
| 58 |
+
std::string payload_;
|
| 59 |
+
std::vector<at::Tensor> tensors_;
|
| 60 |
+
};
|
| 61 |
+
|
| 62 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_call.h
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
#include <torch/csrc/utils/pybind.h>
|
| 6 |
+
|
| 7 |
+
namespace torch::distributed::rpc {
|
| 8 |
+
|
| 9 |
+
// This class converts the content in a PythonCall into py::object. This is a
|
| 10 |
+
// helper class to make sure that all arguments deserialization is done before
|
| 11 |
+
// entering RequestCallbackImpl::processRpc(...), so that the deserialization
|
| 12 |
+
// related logic can be carried out in one spot instead of scattered in multiple
|
| 13 |
+
// places for different message types.
|
| 14 |
+
// NB: The reason for not consolidating class into PythonCall is because
|
| 15 |
+
// PythonCall is a libtorch type which should not depend on Python types.
|
| 16 |
+
class TORCH_API UnpickledPythonCall : public RpcCommandBase {
|
| 17 |
+
public:
|
| 18 |
+
UnpickledPythonCall(
|
| 19 |
+
const SerializedPyObj& serializedPyObj,
|
| 20 |
+
bool isAsyncExecution);
|
| 21 |
+
~UnpickledPythonCall() override;
|
| 22 |
+
|
| 23 |
+
// toMessage() method is not implemented, as objects of this class should
|
| 24 |
+
// never be directly converted into a Message object.
|
| 25 |
+
c10::intrusive_ptr<Message> toMessageImpl() && override;
|
| 26 |
+
const py::object& pythonUdf() const;
|
| 27 |
+
|
| 28 |
+
inline bool isAsyncExecution() const {
|
| 29 |
+
return isAsyncExecution_;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
private:
|
| 33 |
+
py::object pythonUdf_;
|
| 34 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 35 |
+
const bool isAsyncExecution_;
|
| 36 |
+
};
|
| 37 |
+
|
| 38 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/unpickled_python_remote_call.h
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 4 |
+
#include <torch/csrc/distributed/rpc/types.h>
|
| 5 |
+
#include <torch/csrc/distributed/rpc/unpickled_python_call.h>
|
| 6 |
+
#include <torch/csrc/utils/pybind.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::distributed::rpc {
|
| 9 |
+
|
| 10 |
+
// This class converts the content in a PythonRemoteCall into py::object. This
|
| 11 |
+
// is a helper class to make sure that all arguments deserialization is done
|
| 12 |
+
// before entering RequestCallbackImpl::processRpc(...), so that the
|
| 13 |
+
// deserialization related logic can be carried out in one spot instead of
|
| 14 |
+
// scattered in multiple places for different message types.
|
| 15 |
+
// NB: The reason for not consolidating class into PythonRemoteCall is because
|
| 16 |
+
// PythonRemoteCall is a libtorch type which should not depend on Python types.
|
| 17 |
+
class TORCH_API UnpickledPythonRemoteCall final : public UnpickledPythonCall {
|
| 18 |
+
public:
|
| 19 |
+
explicit UnpickledPythonRemoteCall(
|
| 20 |
+
const SerializedPyObj& serializedPyObj,
|
| 21 |
+
const at::IValue& retRRefId,
|
| 22 |
+
const at::IValue& retForkId,
|
| 23 |
+
const bool isAsyncExecution);
|
| 24 |
+
|
| 25 |
+
const RRefId& rrefId() const;
|
| 26 |
+
const ForkId& forkId() const;
|
| 27 |
+
|
| 28 |
+
private:
|
| 29 |
+
RRefId rrefId_;
|
| 30 |
+
ForkId forkId_;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
} // namespace torch::distributed::rpc
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/distributed/rpc/utils.h
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/Device.h>
|
| 4 |
+
#include <c10/core/Event.h>
|
| 5 |
+
#include <c10/core/Stream.h>
|
| 6 |
+
#include <torch/csrc/autograd/profiler.h>
|
| 7 |
+
#include <torch/csrc/distributed/rpc/rpc_command_base.h>
|
| 8 |
+
#include <torch/csrc/jit/serialization/pickle.h>
|
| 9 |
+
#include <torch/csrc/utils/byte_order.h>
|
| 10 |
+
|
| 11 |
+
namespace torch {
|
| 12 |
+
namespace distributed {
|
| 13 |
+
namespace rpc {
|
| 14 |
+
|
| 15 |
+
// Parse error message and return RPCErrorType based on the message.
|
| 16 |
+
TORCH_API RPCErrorType getRPCErrorType(const JitFuture& jitFuture);
|
| 17 |
+
// Create an error string given the error description and error type
|
| 18 |
+
TORCH_API std::string makeRPCError(
|
| 19 |
+
const std::string& rpcErrorStr,
|
| 20 |
+
RPCErrorType errorType);
|
| 21 |
+
|
| 22 |
+
// Given an RPC message received as a request over the wire, deserialize it into
|
| 23 |
+
// the appropriate 'RpcCommandBase' type.
|
| 24 |
+
TORCH_API std::unique_ptr<RpcCommandBase> deserializeRequest(
|
| 25 |
+
const Message& request);
|
| 26 |
+
|
| 27 |
+
// Given an RPC message received as a response over the wire, deserialize it
|
| 28 |
+
// into the appropriate 'RpcCommandBase' type, if the response is
|
| 29 |
+
// FORWARD_AUTOGRAD_RESP type, unwrap it, attach recvBackward() functions
|
| 30 |
+
// to received tensors and set the wrappedMsgType to its wrapped message type.
|
| 31 |
+
TORCH_API std::unique_ptr<RpcCommandBase> deserializeResponse(
|
| 32 |
+
const Message& response,
|
| 33 |
+
MessageType& wrappedMsgType);
|
| 34 |
+
|
| 35 |
+
// Given an RPC message received as a response over the wire, deserialize it
|
| 36 |
+
// into the valid IValue if the message is for a script rpc result,
|
| 37 |
+
// otherwise deserialize it into dummy none ivalue that will never be used.
|
| 38 |
+
// In this deserialization, we also attach recv rpc backward functions if
|
| 39 |
+
// needed.
|
| 40 |
+
IValue deserializeResptoIValueInternal(
|
| 41 |
+
RpcCommandBase& rpc,
|
| 42 |
+
MessageType messageType);
|
| 43 |
+
TORCH_API IValue deserializeRespToIValue(const Message& message);
|
| 44 |
+
|
| 45 |
+
// Note: format is subject to change and intended for RPCs.
|
| 46 |
+
// For saving persistently to disk, use torch::save().
|
| 47 |
+
TORCH_API std::string wireSerialize(
|
| 48 |
+
const std::vector<char>& payload,
|
| 49 |
+
const std::vector<at::Tensor>& tensors);
|
| 50 |
+
|
| 51 |
+
TORCH_API std::pair<std::vector<char>, std::vector<at::Tensor>> wireDeserialize(
|
| 52 |
+
const void* data,
|
| 53 |
+
size_t data_size);
|
| 54 |
+
|
| 55 |
+
// We use vector<char> as the type of blobs because it's what rpc::Message uses
|
| 56 |
+
// for its payload, even though it has the disadvantage that it cannot be
|
| 57 |
+
// allocated with uninitialized memory: it is always zeroed out.
|
| 58 |
+
|
| 59 |
+
// Some Tensors are effectively views of larger Tensors, where only a small
|
| 60 |
+
// subset of the Storage data is referenced. This normally is good and avoids
|
| 61 |
+
// copies when kept locally, but if we naively push the whole Storage over the
|
| 62 |
+
// wire, we'll end up with excess network traffic. This change clones tensors if
|
| 63 |
+
// we'd save at least half the data, and over a minimum hurdle.
|
| 64 |
+
TORCH_API c10::List<at::Tensor> cloneSparseTensors(
|
| 65 |
+
const std::vector<at::Tensor>& tensors);
|
| 66 |
+
|
| 67 |
+
// Combines an original payload and wrapped payload into the original payload.
|
| 68 |
+
// Used to generate the overall payload for the wrapped RPC.
|
| 69 |
+
TORCH_API void writeWrappedPayload(
|
| 70 |
+
std::vector<char>& originalPayload,
|
| 71 |
+
std::vector<char>& additionalPayload);
|
| 72 |
+
|
| 73 |
+
// Reads the additional, wrapped payload from a wrapped RPC off of the input
|
| 74 |
+
// payload. After this, payload will contain the payload of the original,
|
| 75 |
+
// un-wrapped RPC.
|
| 76 |
+
TORCH_API std::vector<at::IValue> readWrappedPayload(
|
| 77 |
+
std::vector<char>& payload,
|
| 78 |
+
const rpc::Message& message);
|
| 79 |
+
|
| 80 |
+
// Takes a list of events from autograd profiler and populates them into
|
| 81 |
+
// profiledEvents to be carried over RPC.
|
| 82 |
+
TORCH_API void populateRemoteProfiledEvents(
|
| 83 |
+
std::vector<torch::autograd::profiler::LegacyEvent>& profiledEvents,
|
| 84 |
+
const torch::autograd::profiler::ProfilerConfig& profilerConfig,
|
| 85 |
+
const std::vector<std::vector<torch::autograd::profiler::LegacyEvent>>&
|
| 86 |
+
eventLists);
|
| 87 |
+
|
| 88 |
+
} // namespace rpc
|
| 89 |
+
} // namespace distributed
|
| 90 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cpp_stacktraces.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/profiler/unwind/unwind.h>
|
| 5 |
+
|
| 6 |
+
namespace torch {
|
| 7 |
+
TORCH_API bool get_cpp_stacktraces_enabled();
|
| 8 |
+
TORCH_API torch::unwind::Mode get_symbolize_mode();
|
| 9 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/cuda_enabled.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
namespace torch::utils {
|
| 4 |
+
|
| 5 |
+
inline constexpr bool cuda_enabled() {
|
| 6 |
+
#ifdef USE_CUDA
|
| 7 |
+
return true;
|
| 8 |
+
#else
|
| 9 |
+
return false;
|
| 10 |
+
#endif
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
} // namespace torch::utils
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/device_lazy_init.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/core/TensorOptions.h>
|
| 4 |
+
|
| 5 |
+
// device_lazy_init() is always compiled, even for CPU-only builds.
|
| 6 |
+
|
| 7 |
+
namespace torch::utils {
|
| 8 |
+
|
| 9 |
+
/**
|
| 10 |
+
* This mechanism of lazy initialization is designed for each device backend.
|
| 11 |
+
* Currently, CUDA and XPU follow this design. This function `device_lazy_init`
|
| 12 |
+
* MUST be called before you attempt to access any Type(CUDA or XPU) object
|
| 13 |
+
* from ATen, in any way. It guarantees that the device runtime status is lazily
|
| 14 |
+
* initialized when the first runtime API is requested.
|
| 15 |
+
*
|
| 16 |
+
* Here are some common ways that a device object may be retrieved:
|
| 17 |
+
* - You call getNonVariableType or getNonVariableTypeOpt
|
| 18 |
+
* - You call toBackend() on a Type
|
| 19 |
+
*
|
| 20 |
+
* It's important to do this correctly, because if you forget to add it you'll
|
| 21 |
+
* get an oblique error message seems like "Cannot initialize CUDA without
|
| 22 |
+
* ATen_cuda library" or "Cannot initialize XPU without ATen_xpu library" if you
|
| 23 |
+
* try to use CUDA or XPU functionality from a CPU-only build, which is not good
|
| 24 |
+
* UX.
|
| 25 |
+
*/
|
| 26 |
+
void device_lazy_init(at::DeviceType device_type);
|
| 27 |
+
void set_requires_device_init(at::DeviceType device_type, bool value);
|
| 28 |
+
|
| 29 |
+
inline void maybe_initialize_device(at::Device& device) {
|
| 30 |
+
// Add more devices here to enable lazy initialization.
|
| 31 |
+
if (device.is_cuda() || device.is_xpu() || device.is_privateuseone()) {
|
| 32 |
+
device_lazy_init(device.type());
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
inline void maybe_initialize_device(std::optional<at::Device>& device) {
|
| 37 |
+
if (!device.has_value()) {
|
| 38 |
+
return;
|
| 39 |
+
}
|
| 40 |
+
maybe_initialize_device(device.value());
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
inline void maybe_initialize_device(const at::TensorOptions& options) {
|
| 44 |
+
auto device = options.device();
|
| 45 |
+
maybe_initialize_device(device);
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
bool is_device_initialized(at::DeviceType device_type);
|
| 49 |
+
|
| 50 |
+
} // namespace torch::utils
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/init.h
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/utils/pybind.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::throughput_benchmark {
|
| 6 |
+
|
| 7 |
+
void initThroughputBenchmarkBindings(PyObject* module);
|
| 8 |
+
|
| 9 |
+
} // namespace torch::throughput_benchmark
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/invalid_arguments.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
#include <string>
|
| 5 |
+
#include <vector>
|
| 6 |
+
|
| 7 |
+
namespace torch {
|
| 8 |
+
|
| 9 |
+
std::string format_invalid_args(
|
| 10 |
+
PyObject* given_args,
|
| 11 |
+
PyObject* given_kwargs,
|
| 12 |
+
const std::string& function_name,
|
| 13 |
+
const std::vector<std::string>& options);
|
| 14 |
+
|
| 15 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/nested.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
#include <torch/csrc/utils/python_arg_parser.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/Tensor.h>
|
| 7 |
+
|
| 8 |
+
namespace torch::utils {
|
| 9 |
+
|
| 10 |
+
at::Tensor nested_tensor_ctor(
|
| 11 |
+
c10::DispatchKey dispatch_key,
|
| 12 |
+
at::ScalarType scalar_type,
|
| 13 |
+
PythonArgs& r);
|
| 14 |
+
|
| 15 |
+
} // namespace torch::utils
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/numpy_stub.h
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
#ifdef USE_NUMPY
|
| 6 |
+
|
| 7 |
+
#if !defined(NO_IMPORT_ARRAY) && !defined(WITH_NUMPY_IMPORT_ARRAY)
|
| 8 |
+
#define NO_IMPORT_ARRAY
|
| 9 |
+
#endif
|
| 10 |
+
|
| 11 |
+
#ifndef PY_ARRAY_UNIQUE_SYMBOL
|
| 12 |
+
#define PY_ARRAY_UNIQUE_SYMBOL __numpy_array_api
|
| 13 |
+
#endif
|
| 14 |
+
|
| 15 |
+
#ifndef NPY_NO_DEPRECATED_API
|
| 16 |
+
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
| 17 |
+
#endif
|
| 18 |
+
|
| 19 |
+
#include <numpy/arrayobject.h>
|
| 20 |
+
|
| 21 |
+
#endif // USE_NUMPY
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/object_ptr.h
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/Export.h>
|
| 4 |
+
#include <torch/csrc/python_headers.h>
|
| 5 |
+
#include <utility>
|
| 6 |
+
|
| 7 |
+
template <class T>
|
| 8 |
+
class TORCH_PYTHON_API THPPointer {
|
| 9 |
+
public:
|
| 10 |
+
THPPointer() : ptr(nullptr){};
|
| 11 |
+
explicit THPPointer(T* ptr) noexcept : ptr(ptr){};
|
| 12 |
+
THPPointer(THPPointer&& p) noexcept : ptr(std::exchange(p.ptr, nullptr)) {}
|
| 13 |
+
|
| 14 |
+
~THPPointer() {
|
| 15 |
+
free();
|
| 16 |
+
};
|
| 17 |
+
T* get() {
|
| 18 |
+
return ptr;
|
| 19 |
+
}
|
| 20 |
+
const T* get() const {
|
| 21 |
+
return ptr;
|
| 22 |
+
}
|
| 23 |
+
T* release() {
|
| 24 |
+
T* tmp = ptr;
|
| 25 |
+
ptr = nullptr;
|
| 26 |
+
return tmp;
|
| 27 |
+
}
|
| 28 |
+
operator T*() {
|
| 29 |
+
return ptr;
|
| 30 |
+
}
|
| 31 |
+
THPPointer& operator=(T* new_ptr) noexcept {
|
| 32 |
+
free();
|
| 33 |
+
ptr = new_ptr;
|
| 34 |
+
return *this;
|
| 35 |
+
}
|
| 36 |
+
THPPointer& operator=(THPPointer&& p) noexcept {
|
| 37 |
+
free();
|
| 38 |
+
ptr = p.ptr;
|
| 39 |
+
p.ptr = nullptr;
|
| 40 |
+
return *this;
|
| 41 |
+
}
|
| 42 |
+
T* operator->() {
|
| 43 |
+
return ptr;
|
| 44 |
+
}
|
| 45 |
+
explicit operator bool() const {
|
| 46 |
+
return ptr != nullptr;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
private:
|
| 50 |
+
void free();
|
| 51 |
+
T* ptr = nullptr;
|
| 52 |
+
};
|
| 53 |
+
|
| 54 |
+
/**
|
| 55 |
+
* An RAII-style, owning pointer to a PyObject. You must protect
|
| 56 |
+
* destruction of this object with the GIL.
|
| 57 |
+
*
|
| 58 |
+
* WARNING: Think twice before putting this as a field in a C++
|
| 59 |
+
* struct. This class does NOT take out the GIL on destruction,
|
| 60 |
+
* so if you will need to ensure that the destructor of your struct
|
| 61 |
+
* is either (a) always invoked when the GIL is taken or (b) takes
|
| 62 |
+
* out the GIL itself. Easiest way to avoid this problem is to
|
| 63 |
+
* not use THPPointer in this situation.
|
| 64 |
+
*/
|
| 65 |
+
using THPObjectPtr = THPPointer<PyObject>;
|
| 66 |
+
using THPCodeObjectPtr = THPPointer<PyCodeObject>;
|
| 67 |
+
using THPFrameObjectPtr = THPPointer<PyFrameObject>;
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/out_types.h
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <ATen/core/Tensor.h>
|
| 4 |
+
|
| 5 |
+
namespace torch::utils {
|
| 6 |
+
|
| 7 |
+
TORCH_API void check_out_type_matches(
|
| 8 |
+
const at::Tensor& result,
|
| 9 |
+
std::optional<at::ScalarType> scalarType,
|
| 10 |
+
bool scalarType_is_none,
|
| 11 |
+
std::optional<at::Layout> layout,
|
| 12 |
+
std::optional<at::Device> device,
|
| 13 |
+
bool device_is_none);
|
| 14 |
+
|
| 15 |
+
}
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pybind.h
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
#include <torch/csrc/utils/pythoncapi_compat.h>
|
| 5 |
+
|
| 6 |
+
#include <ATen/core/Tensor.h>
|
| 7 |
+
#include <ATen/core/jit_type_base.h>
|
| 8 |
+
#include <c10/util/irange.h>
|
| 9 |
+
#include <pybind11/pybind11.h>
|
| 10 |
+
#include <pybind11/stl.h>
|
| 11 |
+
|
| 12 |
+
#include <torch/csrc/Device.h>
|
| 13 |
+
#include <torch/csrc/Dtype.h>
|
| 14 |
+
#include <torch/csrc/DynamicTypes.h>
|
| 15 |
+
#include <torch/csrc/Generator.h>
|
| 16 |
+
#include <torch/csrc/MemoryFormat.h>
|
| 17 |
+
#include <torch/csrc/Stream.h>
|
| 18 |
+
#include <torch/csrc/utils/tensor_memoryformats.h>
|
| 19 |
+
|
| 20 |
+
namespace py = pybind11;
|
| 21 |
+
|
| 22 |
+
// This makes intrusive_ptr to be available as a custom pybind11 holder type,
|
| 23 |
+
// see
|
| 24 |
+
// https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#custom-smart-pointers
|
| 25 |
+
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::intrusive_ptr<T>, true);
|
| 26 |
+
|
| 27 |
+
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonOrSharedTypePtr<T>);
|
| 28 |
+
PYBIND11_DECLARE_HOLDER_TYPE(T, c10::SingletonTypePtr<T>, true);
|
| 29 |
+
|
| 30 |
+
namespace pybind11::detail {
|
| 31 |
+
|
| 32 |
+
// torch.Tensor <-> at::Tensor conversions (without unwrapping)
|
| 33 |
+
template <>
|
| 34 |
+
struct TORCH_PYTHON_API type_caster<at::Tensor> {
|
| 35 |
+
public:
|
| 36 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 37 |
+
PYBIND11_TYPE_CASTER(at::Tensor, _("torch.Tensor"));
|
| 38 |
+
|
| 39 |
+
bool load(handle src, bool);
|
| 40 |
+
|
| 41 |
+
static handle cast(
|
| 42 |
+
const at::Tensor& src,
|
| 43 |
+
return_value_policy /* policy */,
|
| 44 |
+
handle /* parent */);
|
| 45 |
+
};
|
| 46 |
+
|
| 47 |
+
// torch._StorageBase <-> at::Storage
|
| 48 |
+
template <>
|
| 49 |
+
struct type_caster<at::Storage> {
|
| 50 |
+
public:
|
| 51 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 52 |
+
PYBIND11_TYPE_CASTER(at::Storage, _("torch.StorageBase"));
|
| 53 |
+
|
| 54 |
+
bool load(handle src, bool) {
|
| 55 |
+
PyObject* obj = src.ptr();
|
| 56 |
+
if (torch::isStorage(obj)) {
|
| 57 |
+
value = torch::createStorage(obj);
|
| 58 |
+
return true;
|
| 59 |
+
}
|
| 60 |
+
return false;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
static handle cast(
|
| 64 |
+
const at::Storage& src,
|
| 65 |
+
return_value_policy /* policy */,
|
| 66 |
+
handle /* parent */) {
|
| 67 |
+
return handle(torch::createPyObject(src));
|
| 68 |
+
}
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
template <>
|
| 72 |
+
struct type_caster<at::Generator> {
|
| 73 |
+
public:
|
| 74 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 75 |
+
PYBIND11_TYPE_CASTER(at::Generator, _("torch.Generator"));
|
| 76 |
+
|
| 77 |
+
bool load(handle src, bool) {
|
| 78 |
+
PyObject* obj = src.ptr();
|
| 79 |
+
if (THPGenerator_Check(obj)) {
|
| 80 |
+
value = reinterpret_cast<THPGenerator*>(obj)->cdata;
|
| 81 |
+
return true;
|
| 82 |
+
}
|
| 83 |
+
return false;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
static handle cast(
|
| 87 |
+
const at::Generator& src,
|
| 88 |
+
return_value_policy /* policy */,
|
| 89 |
+
handle /* parent */) {
|
| 90 |
+
return handle(THPGenerator_Wrap(src));
|
| 91 |
+
}
|
| 92 |
+
};
|
| 93 |
+
|
| 94 |
+
template <>
|
| 95 |
+
struct TORCH_PYTHON_API type_caster<at::IntArrayRef> {
|
| 96 |
+
public:
|
| 97 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 98 |
+
PYBIND11_TYPE_CASTER(at::IntArrayRef, _("Tuple[int, ...]"));
|
| 99 |
+
|
| 100 |
+
bool load(handle src, bool);
|
| 101 |
+
static handle cast(
|
| 102 |
+
at::IntArrayRef src,
|
| 103 |
+
return_value_policy /* policy */,
|
| 104 |
+
handle /* parent */);
|
| 105 |
+
|
| 106 |
+
private:
|
| 107 |
+
std::vector<int64_t> v_value;
|
| 108 |
+
};
|
| 109 |
+
|
| 110 |
+
template <>
|
| 111 |
+
struct TORCH_PYTHON_API type_caster<at::SymIntArrayRef> {
|
| 112 |
+
public:
|
| 113 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 114 |
+
PYBIND11_TYPE_CASTER(at::SymIntArrayRef, _("List[int]"));
|
| 115 |
+
|
| 116 |
+
bool load(handle src, bool);
|
| 117 |
+
static handle cast(
|
| 118 |
+
at::SymIntArrayRef src,
|
| 119 |
+
return_value_policy /* policy */,
|
| 120 |
+
handle /* parent */);
|
| 121 |
+
|
| 122 |
+
private:
|
| 123 |
+
std::vector<c10::SymInt> v_value;
|
| 124 |
+
};
|
| 125 |
+
|
| 126 |
+
template <>
|
| 127 |
+
struct TORCH_PYTHON_API type_caster<at::ArrayRef<c10::SymNode>> {
|
| 128 |
+
public:
|
| 129 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 130 |
+
PYBIND11_TYPE_CASTER(at::ArrayRef<c10::SymNode>, _("List[SymNode]"));
|
| 131 |
+
|
| 132 |
+
bool load(handle src, bool);
|
| 133 |
+
static handle cast(
|
| 134 |
+
at::ArrayRef<c10::SymNode> src,
|
| 135 |
+
return_value_policy /* policy */,
|
| 136 |
+
handle /* parent */);
|
| 137 |
+
|
| 138 |
+
private:
|
| 139 |
+
std::vector<c10::SymNode> v_value;
|
| 140 |
+
};
|
| 141 |
+
|
| 142 |
+
template <>
|
| 143 |
+
struct type_caster<at::MemoryFormat> {
|
| 144 |
+
public:
|
| 145 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 146 |
+
PYBIND11_TYPE_CASTER(at::MemoryFormat, _("torch.memory_format"));
|
| 147 |
+
|
| 148 |
+
bool load(handle src, bool) {
|
| 149 |
+
PyObject* obj = src.ptr();
|
| 150 |
+
if (THPMemoryFormat_Check(obj)) {
|
| 151 |
+
value = reinterpret_cast<THPMemoryFormat*>(obj)->memory_format;
|
| 152 |
+
return true;
|
| 153 |
+
}
|
| 154 |
+
return false;
|
| 155 |
+
}
|
| 156 |
+
static handle cast(
|
| 157 |
+
at::MemoryFormat src,
|
| 158 |
+
return_value_policy /* policy */,
|
| 159 |
+
handle /* parent */) {
|
| 160 |
+
return handle(Py_NewRef(torch::utils::getTHPMemoryFormat(src)));
|
| 161 |
+
}
|
| 162 |
+
};
|
| 163 |
+
|
| 164 |
+
template <>
|
| 165 |
+
struct type_caster<at::Device> {
|
| 166 |
+
public:
|
| 167 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 168 |
+
PYBIND11_TYPE_CASTER(at::Device, _("torch.device"));
|
| 169 |
+
|
| 170 |
+
// PYBIND11_TYPE_CASTER defines a member field called value. Since at::Device
|
| 171 |
+
// cannot be default-initialized, we provide this constructor to explicitly
|
| 172 |
+
// initialize that field. The value doesn't matter as it will be overwritten
|
| 173 |
+
// after a successful call to load.
|
| 174 |
+
type_caster() : value(c10::kCPU) {}
|
| 175 |
+
|
| 176 |
+
bool load(handle src, bool) {
|
| 177 |
+
PyObject* obj = src.ptr();
|
| 178 |
+
if (THPDevice_Check(obj)) {
|
| 179 |
+
value = reinterpret_cast<THPDevice*>(obj)->device;
|
| 180 |
+
return true;
|
| 181 |
+
}
|
| 182 |
+
return false;
|
| 183 |
+
}
|
| 184 |
+
|
| 185 |
+
static handle cast(
|
| 186 |
+
const at::Device& src,
|
| 187 |
+
return_value_policy /* policy */,
|
| 188 |
+
handle /* parent */) {
|
| 189 |
+
return handle(THPDevice_New(src));
|
| 190 |
+
}
|
| 191 |
+
};
|
| 192 |
+
|
| 193 |
+
template <>
|
| 194 |
+
struct type_caster<at::ScalarType> {
|
| 195 |
+
public:
|
| 196 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 197 |
+
PYBIND11_TYPE_CASTER(at::ScalarType, _("torch.dtype"));
|
| 198 |
+
|
| 199 |
+
// PYBIND11_TYPE_CASTER defines a member field called value. at::ScalarType
|
| 200 |
+
// cannot be default-initialized, we provide this constructor to explicitly
|
| 201 |
+
// initialize that field. The value doesn't matter as it will be overwritten
|
| 202 |
+
// after a successful call to load.
|
| 203 |
+
type_caster() : value(at::kFloat) {}
|
| 204 |
+
|
| 205 |
+
bool load(handle src, bool) {
|
| 206 |
+
PyObject* obj = src.ptr();
|
| 207 |
+
if (THPDtype_Check(obj)) {
|
| 208 |
+
value = reinterpret_cast<THPDtype*>(obj)->scalar_type;
|
| 209 |
+
return true;
|
| 210 |
+
}
|
| 211 |
+
return false;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
static handle cast(
|
| 215 |
+
const at::ScalarType& src,
|
| 216 |
+
return_value_policy /* policy */,
|
| 217 |
+
handle /* parent */) {
|
| 218 |
+
return Py_NewRef(torch::getTHPDtype(src));
|
| 219 |
+
}
|
| 220 |
+
};
|
| 221 |
+
|
| 222 |
+
template <>
|
| 223 |
+
struct type_caster<c10::Stream> {
|
| 224 |
+
public:
|
| 225 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 226 |
+
PYBIND11_TYPE_CASTER(c10::Stream, _("torch.Stream"));
|
| 227 |
+
|
| 228 |
+
// PYBIND11_TYPE_CASTER defines a member field called value. Since c10::Stream
|
| 229 |
+
// cannot be default-initialized, we provide this constructor to explicitly
|
| 230 |
+
// initialize that field. The value doesn't matter as it will be overwritten
|
| 231 |
+
// after a successful call to load.
|
| 232 |
+
type_caster() : value(c10::Stream::DEFAULT, c10::Device(c10::kCPU, 0)) {}
|
| 233 |
+
|
| 234 |
+
bool load(handle src, bool) {
|
| 235 |
+
PyObject* obj = src.ptr();
|
| 236 |
+
if (THPStream_Check(obj)) {
|
| 237 |
+
value = c10::Stream::unpack3(
|
| 238 |
+
((THPStream*)obj)->stream_id,
|
| 239 |
+
static_cast<c10::DeviceIndex>(((THPStream*)obj)->device_index),
|
| 240 |
+
static_cast<c10::DeviceType>(((THPStream*)obj)->device_type));
|
| 241 |
+
return true;
|
| 242 |
+
}
|
| 243 |
+
return false;
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
static handle cast(
|
| 247 |
+
const c10::Stream& src,
|
| 248 |
+
return_value_policy /* policy */,
|
| 249 |
+
handle /* parent */) {
|
| 250 |
+
return handle(THPStream_Wrap(src));
|
| 251 |
+
}
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
template <>
|
| 255 |
+
struct type_caster<c10::DispatchKey>
|
| 256 |
+
: public type_caster_base<c10::DispatchKey> {
|
| 257 |
+
using base = type_caster_base<c10::DispatchKey>;
|
| 258 |
+
c10::DispatchKey tmp{};
|
| 259 |
+
|
| 260 |
+
public:
|
| 261 |
+
bool load(handle src, bool convert) {
|
| 262 |
+
if (base::load(src, convert)) {
|
| 263 |
+
return true;
|
| 264 |
+
} else if (py::isinstance(
|
| 265 |
+
src, py::module_::import("builtins").attr("str"))) {
|
| 266 |
+
tmp = c10::parseDispatchKey(py::cast<std::string>(src));
|
| 267 |
+
value = &tmp;
|
| 268 |
+
return true;
|
| 269 |
+
}
|
| 270 |
+
return false;
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
static handle cast(
|
| 274 |
+
c10::DispatchKey src,
|
| 275 |
+
return_value_policy policy,
|
| 276 |
+
handle parent) {
|
| 277 |
+
return base::cast(src, policy, parent);
|
| 278 |
+
}
|
| 279 |
+
};
|
| 280 |
+
|
| 281 |
+
template <>
|
| 282 |
+
struct TORCH_PYTHON_API type_caster<c10::Scalar> {
|
| 283 |
+
public:
|
| 284 |
+
PYBIND11_TYPE_CASTER(
|
| 285 |
+
c10::Scalar,
|
| 286 |
+
_("Union[Number, torch.SymInt, torch.SymFloat, torch.SymBool]"));
|
| 287 |
+
bool load(py::handle src, bool);
|
| 288 |
+
|
| 289 |
+
static py::handle cast(
|
| 290 |
+
const c10::Scalar& si,
|
| 291 |
+
return_value_policy /* policy */,
|
| 292 |
+
handle /* parent */);
|
| 293 |
+
};
|
| 294 |
+
|
| 295 |
+
template <>
|
| 296 |
+
struct TORCH_PYTHON_API type_caster<c10::SymInt> {
|
| 297 |
+
public:
|
| 298 |
+
PYBIND11_TYPE_CASTER(c10::SymInt, _("Union[int, torch.SymInt]"));
|
| 299 |
+
bool load(py::handle src, bool);
|
| 300 |
+
|
| 301 |
+
static py::handle cast(
|
| 302 |
+
const c10::SymInt& si,
|
| 303 |
+
return_value_policy /* policy */,
|
| 304 |
+
handle /* parent */);
|
| 305 |
+
};
|
| 306 |
+
|
| 307 |
+
template <>
|
| 308 |
+
struct TORCH_PYTHON_API type_caster<c10::SymFloat> {
|
| 309 |
+
public:
|
| 310 |
+
PYBIND11_TYPE_CASTER(c10::SymFloat, _("float"));
|
| 311 |
+
bool load(py::handle src, bool);
|
| 312 |
+
|
| 313 |
+
static py::handle cast(
|
| 314 |
+
const c10::SymFloat& si,
|
| 315 |
+
return_value_policy /* policy */,
|
| 316 |
+
handle /* parent */);
|
| 317 |
+
};
|
| 318 |
+
|
| 319 |
+
template <>
|
| 320 |
+
struct TORCH_PYTHON_API type_caster<c10::SymBool> {
|
| 321 |
+
public:
|
| 322 |
+
PYBIND11_TYPE_CASTER(c10::SymBool, _("Union[bool, torch.SymBool]"));
|
| 323 |
+
bool load(py::handle src, bool);
|
| 324 |
+
|
| 325 |
+
static py::handle cast(
|
| 326 |
+
const c10::SymBool& si,
|
| 327 |
+
return_value_policy /* policy */,
|
| 328 |
+
handle /* parent */);
|
| 329 |
+
};
|
| 330 |
+
|
| 331 |
+
template <typename T>
|
| 332 |
+
struct type_caster<c10::complex<T>> {
|
| 333 |
+
public:
|
| 334 |
+
// NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes)
|
| 335 |
+
PYBIND11_TYPE_CASTER(c10::complex<T>, _("complex"));
|
| 336 |
+
|
| 337 |
+
bool load(handle src, bool) {
|
| 338 |
+
PyObject* obj = src.ptr();
|
| 339 |
+
|
| 340 |
+
// Refered from `THPUtils_unpackComplexDouble`
|
| 341 |
+
Py_complex py_complex = PyComplex_AsCComplex(obj);
|
| 342 |
+
if (py_complex.real == -1.0 && PyErr_Occurred()) {
|
| 343 |
+
return false;
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
// Python's Complex is always double precision.
|
| 347 |
+
value = c10::complex<double>(py_complex.real, py_complex.imag);
|
| 348 |
+
return true;
|
| 349 |
+
}
|
| 350 |
+
|
| 351 |
+
static handle cast(
|
| 352 |
+
const c10::complex<T>& complex,
|
| 353 |
+
return_value_policy /* policy */,
|
| 354 |
+
handle /* parent */) {
|
| 355 |
+
// Python only knows double precision complex.
|
| 356 |
+
return handle(PyComplex_FromDoubles(complex.real(), complex.imag()));
|
| 357 |
+
}
|
| 358 |
+
};
|
| 359 |
+
|
| 360 |
+
} // namespace pybind11::detail
|
| 361 |
+
|
| 362 |
+
namespace torch::impl {
|
| 363 |
+
|
| 364 |
+
// Use this function if you have a C++ object that is used from both C++
|
| 365 |
+
// and Python contexts, and you need its GIL to be released when you
|
| 366 |
+
// destruct it in the Python context.
|
| 367 |
+
//
|
| 368 |
+
// This function is a valid shared_ptr destructor and can be used to
|
| 369 |
+
// conveniently allocate a shared_ptr to an object whose destructor will be run
|
| 370 |
+
// without the GIL. Pass it as the second argument to shared_ptr, e.g.,
|
| 371 |
+
//
|
| 372 |
+
// shared_ptr<T>(new T(), destroy_without_gil<T>)
|
| 373 |
+
//
|
| 374 |
+
// Attaching the GIL release logic to the holder pointer rather than the
|
| 375 |
+
// actual destructor of T is helpful when T is Python-agnostic and
|
| 376 |
+
// shouldn't refer to the PYthon API.
|
| 377 |
+
//
|
| 378 |
+
// Note there are limitations to the correctness of code that makes use of this.
|
| 379 |
+
// In particular, if a shared_ptr is constructed from C++ code without this
|
| 380 |
+
// destructor and then passed to pybind11, pybind11 will happily take ownership
|
| 381 |
+
// of the shared_ptr (and be willing to destruct it from a context where it is
|
| 382 |
+
// holding the GIL). unique_ptr with a type branded deleter is less prone to
|
| 383 |
+
// this problem, because a stock deleter unique_ptr is not convertible with it.
|
| 384 |
+
// I plan to mitigate this problem by adding DEBUG-only asserts to the true C++
|
| 385 |
+
// destructors that the GIL is not held (using a virtual call to get to the
|
| 386 |
+
// Python interpreter); alternately, we could use a virtual call to simply
|
| 387 |
+
// ensure we release the GIL in the C++ destructor, however, this is a layering
|
| 388 |
+
// violation (why does code that is ostensibly Python agnostic calling into the
|
| 389 |
+
// GIL).
|
| 390 |
+
//
|
| 391 |
+
// Adapted from
|
| 392 |
+
// https://github.com/pybind/pybind11/issues/1446#issuecomment-406341510
|
| 393 |
+
template <typename T>
|
| 394 |
+
inline void destroy_without_gil(T* ptr) {
|
| 395 |
+
// Because the ownership of a shared_ptr is diffuse, it's not possible to
|
| 396 |
+
// necessarily predict whether or not the last reference to an object will
|
| 397 |
+
// be destructed from Python or C++. This means that in the destructor here,
|
| 398 |
+
// we don't necessarily know if we actually have the GIL or not; in fact,
|
| 399 |
+
// we don't even know if the Python interpreter still exists! Thus, we have
|
| 400 |
+
// to test for it before releasing the GIL.
|
| 401 |
+
//
|
| 402 |
+
// PyGILState_Check is hopefully self explanatory. But Py_IsInitialized or
|
| 403 |
+
// _PyIsFinalizing? Both get set at the same time during the Python
|
| 404 |
+
// destruction process:
|
| 405 |
+
// https://github.com/python/cpython/blob/d92513390a1a0da781bb08c284136f4d7abea36d/Python/pylifecycle.c#L1716-L1717
|
| 406 |
+
// so the operant question is whether or not you want to release the GIL after
|
| 407 |
+
// finalization has completed (and there is just no Python interpreter).
|
| 408 |
+
// Clearly there is no need to release GIL in that state, so we want
|
| 409 |
+
// Py_IsInitialized.
|
| 410 |
+
if (Py_IsInitialized() && PyGILState_Check()) {
|
| 411 |
+
pybind11::gil_scoped_release nogil;
|
| 412 |
+
delete ptr;
|
| 413 |
+
} else {
|
| 414 |
+
delete ptr;
|
| 415 |
+
}
|
| 416 |
+
}
|
| 417 |
+
|
| 418 |
+
} // namespace torch::impl
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pycfunction_helpers.h
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <c10/macros/Macros.h>
|
| 4 |
+
|
| 5 |
+
#include <Python.h>
|
| 6 |
+
|
| 7 |
+
inline PyCFunction castPyCFunctionWithKeywords(PyCFunctionWithKeywords func) {
|
| 8 |
+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wcast-function-type")
|
| 9 |
+
C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED("-Wcast-function-type-strict")
|
| 10 |
+
return reinterpret_cast<PyCFunction>(func);
|
| 11 |
+
C10_DIAGNOSTIC_POP()
|
| 12 |
+
C10_DIAGNOSTIC_POP()
|
| 13 |
+
}
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/pyobject_preservation.h
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
#include <torch/csrc/python_headers.h>
|
| 4 |
+
|
| 5 |
+
// This file contains utilities used for handling PyObject preservation
|
| 6 |
+
|
| 7 |
+
void clear_slots(PyTypeObject* type, PyObject* self);
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_arg_parser.h
ADDED
|
@@ -0,0 +1,1294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// Parse arguments to Python functions implemented in C++
|
| 4 |
+
// This is similar to PyArg_ParseTupleAndKeywords(), but specifically handles
|
| 5 |
+
// the types relevant to PyTorch and distinguishes between overloaded function
|
| 6 |
+
// signatures.
|
| 7 |
+
//
|
| 8 |
+
// Example:
|
| 9 |
+
//
|
| 10 |
+
// static PythonArgParser parser({
|
| 11 |
+
// "norm(Scalar p, int64_t dim, bool keepdim=False)",
|
| 12 |
+
// "norm(Scalar p=2)",
|
| 13 |
+
// });
|
| 14 |
+
// ParsedArgs<3> parsed_args;
|
| 15 |
+
// auto r = parser.parse(args, kwargs, parsed_args);
|
| 16 |
+
// if (r.idx == 0) {
|
| 17 |
+
// norm(r.scalar(0), r.int64(1), r.bool(0));
|
| 18 |
+
// } else {
|
| 19 |
+
// norm(r.scalar(0));
|
| 20 |
+
// }
|
| 21 |
+
//
|
| 22 |
+
// We auto-generate most uses of PythonArgParser; the generated files
|
| 23 |
+
// are torch/csrc/autograd/generated/python_*.cpp
|
| 24 |
+
//
|
| 25 |
+
// Some gotchas that you should watch out for:
|
| 26 |
+
//
|
| 27 |
+
// - Note [Order of overloads matters]
|
| 28 |
+
// Order of overloads matters. A set of input arguments may
|
| 29 |
+
// bind to multiple argument specs; we will always pick the
|
| 30 |
+
// first one in PythonArgParser. However, when you are writing
|
| 31 |
+
// overloads in, e.g., native_functions.yaml, you don't have to
|
| 32 |
+
// worry about what order you write them, because the code
|
| 33 |
+
// generation logic always gives the overloads a canonical
|
| 34 |
+
// order, where Tensor overloads come first, before Scalar overloads.
|
| 35 |
+
// This logic is in sort_declarations in
|
| 36 |
+
// tools/autograd/gen_python_functions.py
|
| 37 |
+
//
|
| 38 |
+
// - Zero-dim tensors (e.g., torch.tensor(2)) bind to both
|
| 39 |
+
// Scalar and Tensor, UNLESS they require grad (in which case
|
| 40 |
+
// they only bind to Tensor).
|
| 41 |
+
|
| 42 |
+
#include <pybind11/pytypes.h>
|
| 43 |
+
#include <torch/csrc/python_headers.h>
|
| 44 |
+
|
| 45 |
+
#include <torch/csrc/Device.h>
|
| 46 |
+
#include <torch/csrc/Dtype.h>
|
| 47 |
+
#include <torch/csrc/DynamicTypes.h>
|
| 48 |
+
#include <torch/csrc/Exceptions.h>
|
| 49 |
+
#include <torch/csrc/Export.h>
|
| 50 |
+
#include <torch/csrc/Generator.h>
|
| 51 |
+
#include <torch/csrc/Layout.h>
|
| 52 |
+
#include <torch/csrc/MemoryFormat.h>
|
| 53 |
+
#include <torch/csrc/QScheme.h>
|
| 54 |
+
#include <torch/csrc/Stream.h>
|
| 55 |
+
#include <torch/csrc/autograd/python_variable.h>
|
| 56 |
+
#include <torch/csrc/autograd/variable.h>
|
| 57 |
+
#include <torch/csrc/dynamo/eval_frame.h>
|
| 58 |
+
#include <torch/csrc/jit/frontend/tracer.h>
|
| 59 |
+
#include <torch/csrc/python_dimname.h>
|
| 60 |
+
#include <torch/csrc/tensor/python_tensor.h>
|
| 61 |
+
#include <torch/csrc/utils/disable_torch_function.h>
|
| 62 |
+
#include <torch/csrc/utils/object_ptr.h>
|
| 63 |
+
#include <torch/csrc/utils/pybind.h>
|
| 64 |
+
#include <torch/csrc/utils/python_numbers.h>
|
| 65 |
+
#include <torch/csrc/utils/python_strings.h>
|
| 66 |
+
#include <torch/csrc/utils/python_symnode.h>
|
| 67 |
+
#include <torch/csrc/utils/six.h>
|
| 68 |
+
|
| 69 |
+
#include <ATen/DeviceAccelerator.h>
|
| 70 |
+
#include <ATen/PythonTorchFunctionTLS.h>
|
| 71 |
+
#include <ATen/core/Tensor.h>
|
| 72 |
+
#include <c10/util/Exception.h>
|
| 73 |
+
#include <c10/util/irange.h>
|
| 74 |
+
|
| 75 |
+
#include <c10/core/SymFloat.h>
|
| 76 |
+
#include <c10/core/SymNodeImpl.h>
|
| 77 |
+
|
| 78 |
+
#include <c10/core/DispatchKeySet.h>
|
| 79 |
+
#include <array>
|
| 80 |
+
#include <cstddef>
|
| 81 |
+
#include <string>
|
| 82 |
+
#include <vector>
|
| 83 |
+
|
| 84 |
+
inline bool THPUtils_checkScalar(PyObject* obj) {
|
| 85 |
+
#ifdef USE_NUMPY
|
| 86 |
+
if (torch::utils::is_numpy_scalar(obj)) {
|
| 87 |
+
return true;
|
| 88 |
+
}
|
| 89 |
+
#endif
|
| 90 |
+
return PyFloat_Check(obj) || PyLong_Check(obj) || PyComplex_Check(obj) ||
|
| 91 |
+
torch::is_symint(py::handle(obj)) ||
|
| 92 |
+
torch::is_symfloat(py::handle(obj)) || torch::is_symbool(py::handle(obj));
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
namespace torch {
|
| 96 |
+
|
| 97 |
+
bool should_allow_numbers_as_tensors(const std::string& name);
|
| 98 |
+
|
| 99 |
+
enum class ParameterType {
|
| 100 |
+
TENSOR,
|
| 101 |
+
SCALAR,
|
| 102 |
+
INT64,
|
| 103 |
+
SYM_INT,
|
| 104 |
+
DOUBLE,
|
| 105 |
+
COMPLEX,
|
| 106 |
+
TENSOR_LIST,
|
| 107 |
+
INT_LIST,
|
| 108 |
+
GENERATOR,
|
| 109 |
+
BOOL,
|
| 110 |
+
STORAGE,
|
| 111 |
+
PYOBJECT,
|
| 112 |
+
SCALARTYPE,
|
| 113 |
+
LAYOUT,
|
| 114 |
+
MEMORY_FORMAT,
|
| 115 |
+
DEVICE,
|
| 116 |
+
STREAM,
|
| 117 |
+
STRING,
|
| 118 |
+
DIMNAME,
|
| 119 |
+
DIMNAME_LIST,
|
| 120 |
+
QSCHEME,
|
| 121 |
+
FLOAT_LIST,
|
| 122 |
+
SCALAR_LIST,
|
| 123 |
+
SYM_INT_LIST,
|
| 124 |
+
DISPATCH_KEY_SET
|
| 125 |
+
};
|
| 126 |
+
|
| 127 |
+
struct FunctionParameter;
|
| 128 |
+
struct FunctionSignature;
|
| 129 |
+
struct PythonArgs;
|
| 130 |
+
|
| 131 |
+
// Contains bound Python arguments in declaration order
|
| 132 |
+
template <int N>
|
| 133 |
+
struct ParsedArgs {
|
| 134 |
+
ParsedArgs() : args() {}
|
| 135 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
| 136 |
+
PyObject* args[N];
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
// A PythonArgParser contains a list of valid signatures. Instances are
|
| 140 |
+
// typically global variables and should be immutable.
|
| 141 |
+
struct PYBIND11_EXPORT PythonArgParser {
|
| 142 |
+
explicit PythonArgParser(
|
| 143 |
+
const std::vector<std::string>& fmts,
|
| 144 |
+
bool traceable = false);
|
| 145 |
+
|
| 146 |
+
// meant only for `torch` functions.
|
| 147 |
+
template <int N>
|
| 148 |
+
inline PythonArgs parse(
|
| 149 |
+
PyObject* self,
|
| 150 |
+
PyObject* args,
|
| 151 |
+
PyObject* kwargs,
|
| 152 |
+
ParsedArgs<N>& dst);
|
| 153 |
+
|
| 154 |
+
template <int N>
|
| 155 |
+
inline PythonArgs parse(PyObject* args, PyObject* kwargs, ParsedArgs<N>& dst);
|
| 156 |
+
|
| 157 |
+
inline PythonArgs parse(PyObject* self, ParsedArgs<0>& dst);
|
| 158 |
+
|
| 159 |
+
// Formatted strings of non-hidden signatures
|
| 160 |
+
std::vector<std::string> get_signatures() const;
|
| 161 |
+
|
| 162 |
+
private:
|
| 163 |
+
[[noreturn]] void print_error(
|
| 164 |
+
PyObject* self,
|
| 165 |
+
PyObject* args,
|
| 166 |
+
PyObject* kwargs,
|
| 167 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
| 168 |
+
PyObject* parsed_args[]);
|
| 169 |
+
void check_deprecated(const FunctionSignature& signature);
|
| 170 |
+
PythonArgs raw_parse(
|
| 171 |
+
PyObject* self,
|
| 172 |
+
PyObject* args,
|
| 173 |
+
PyObject* kwargs,
|
| 174 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
| 175 |
+
PyObject* parsed_args[]);
|
| 176 |
+
|
| 177 |
+
std::vector<FunctionSignature> signatures_;
|
| 178 |
+
std::string function_name;
|
| 179 |
+
size_t max_args;
|
| 180 |
+
bool traceable;
|
| 181 |
+
};
|
| 182 |
+
|
| 183 |
+
// FunctionSignature represents a single valid signature for a Python function.
|
| 184 |
+
// It is immutable once constructed. The contained data can be concurrently
|
| 185 |
+
// accessed by multiple calls.
|
| 186 |
+
struct FunctionSignature {
|
| 187 |
+
explicit FunctionSignature(const std::string& fmt, int index);
|
| 188 |
+
|
| 189 |
+
bool parse(
|
| 190 |
+
PyObject* self,
|
| 191 |
+
PyObject* args,
|
| 192 |
+
PyObject* kwargs,
|
| 193 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
| 194 |
+
PyObject* dst[],
|
| 195 |
+
std::vector<PyObject*>& overloaded_args,
|
| 196 |
+
bool raise_exception);
|
| 197 |
+
|
| 198 |
+
std::string toString() const;
|
| 199 |
+
|
| 200 |
+
std::string name;
|
| 201 |
+
std::vector<FunctionParameter> params;
|
| 202 |
+
size_t min_args;
|
| 203 |
+
size_t max_args;
|
| 204 |
+
size_t max_pos_args;
|
| 205 |
+
int index;
|
| 206 |
+
bool hidden;
|
| 207 |
+
bool deprecated;
|
| 208 |
+
};
|
| 209 |
+
|
| 210 |
+
// PythonArgs contains bound Python arguments for an actual invocation
|
| 211 |
+
// along with references to the matched signature.
|
| 212 |
+
struct PythonArgs {
|
| 213 |
+
PythonArgs(
|
| 214 |
+
bool traceable,
|
| 215 |
+
const FunctionSignature& signature,
|
| 216 |
+
PyObject** args,
|
| 217 |
+
std::vector<PyObject*> overloaded_args)
|
| 218 |
+
: idx(signature.index),
|
| 219 |
+
traceable(traceable),
|
| 220 |
+
signature(signature),
|
| 221 |
+
args(args),
|
| 222 |
+
overloaded_args(std::move(overloaded_args)) {}
|
| 223 |
+
|
| 224 |
+
int idx;
|
| 225 |
+
bool traceable;
|
| 226 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
|
| 227 |
+
const FunctionSignature& signature;
|
| 228 |
+
PyObject** args;
|
| 229 |
+
std::vector<PyObject*> overloaded_args; // NOTE: borrowed references
|
| 230 |
+
|
| 231 |
+
inline bool has_torch_function();
|
| 232 |
+
inline std::string get_func_name();
|
| 233 |
+
inline at::Tensor tensor(int i);
|
| 234 |
+
inline std::optional<at::Tensor> optionalTensor(int i);
|
| 235 |
+
inline at::Scalar scalar(int i);
|
| 236 |
+
inline at::Scalar scalarWithDefault(int i, const at::Scalar& default_scalar);
|
| 237 |
+
inline std::vector<at::Scalar> scalarlist(int i);
|
| 238 |
+
inline std::vector<at::Tensor> tensorlist(int i);
|
| 239 |
+
inline torch::List<std::optional<at::Tensor>> list_of_optional_tensors(int i);
|
| 240 |
+
template <int N>
|
| 241 |
+
inline std::array<at::Tensor, N> tensorlist_n(int i);
|
| 242 |
+
inline std::vector<int64_t> intlist(int i);
|
| 243 |
+
inline std::vector<c10::SymInt> symintlist(int i);
|
| 244 |
+
inline c10::OptionalArray<int64_t> intlistOptional(int i);
|
| 245 |
+
inline c10::OptionalArray<c10::SymInt> symintlistOptional(int i);
|
| 246 |
+
inline std::vector<int64_t> intlistWithDefault(
|
| 247 |
+
int i,
|
| 248 |
+
std::vector<int64_t> default_intlist);
|
| 249 |
+
inline std::optional<at::Generator> generator(int i);
|
| 250 |
+
inline at::Storage storage(int i);
|
| 251 |
+
inline at::Storage storage(
|
| 252 |
+
int i,
|
| 253 |
+
at::ScalarType& storage_scalar_type,
|
| 254 |
+
bool& is_typed_storage);
|
| 255 |
+
inline c10::Stream stream(int i);
|
| 256 |
+
inline at::ScalarType scalartype(int i);
|
| 257 |
+
inline at::ScalarType scalartypeWithDefault(
|
| 258 |
+
int i,
|
| 259 |
+
at::ScalarType default_scalartype);
|
| 260 |
+
inline std::optional<at::ScalarType> scalartypeOptional(int i);
|
| 261 |
+
inline std::optional<at::Scalar> scalarOptional(int i);
|
| 262 |
+
inline std::optional<int64_t> toInt64Optional(int i);
|
| 263 |
+
inline std::optional<c10::SymInt> toSymIntOptional(int i);
|
| 264 |
+
inline std::optional<bool> toBoolOptional(int i);
|
| 265 |
+
inline std::optional<double> toDoubleOptional(int i);
|
| 266 |
+
inline c10::OptionalArray<double> doublelistOptional(int i);
|
| 267 |
+
inline std::vector<double> doublelist(int i);
|
| 268 |
+
inline std::vector<double> getDoublelist(int i);
|
| 269 |
+
inline at::Layout layout(int i);
|
| 270 |
+
inline at::Layout layoutWithDefault(int i, at::Layout default_layout);
|
| 271 |
+
inline std::optional<at::Layout> layoutOptional(int i);
|
| 272 |
+
inline at::Device device(int i);
|
| 273 |
+
inline at::Device deviceWithDefault(int i, const at::Device& default_device);
|
| 274 |
+
inline std::optional<at::Device> deviceOptional(int i);
|
| 275 |
+
inline at::Dimname dimname(int i);
|
| 276 |
+
inline std::vector<at::Dimname> dimnamelist(int i);
|
| 277 |
+
inline std::optional<std::vector<at::Dimname>> toDimnameListOptional(int i);
|
| 278 |
+
inline at::MemoryFormat memoryformat(int i);
|
| 279 |
+
inline std::optional<at::MemoryFormat> memoryformatOptional(int i);
|
| 280 |
+
inline at::QScheme toQScheme(int i);
|
| 281 |
+
inline std::string string(int i);
|
| 282 |
+
inline std::string stringWithDefault(int i, const std::string& default_str);
|
| 283 |
+
inline std::optional<std::string> stringOptional(int i);
|
| 284 |
+
inline c10::string_view stringView(int i);
|
| 285 |
+
inline c10::string_view stringViewWithDefault(
|
| 286 |
+
int i,
|
| 287 |
+
const c10::string_view default_str);
|
| 288 |
+
inline std::optional<c10::string_view> stringViewOptional(int i);
|
| 289 |
+
inline PyObject* pyobject(int i);
|
| 290 |
+
inline int64_t toInt64(int i);
|
| 291 |
+
inline c10::SymInt toSymInt(int i);
|
| 292 |
+
inline c10::SymBool toSymBool(int i);
|
| 293 |
+
inline int64_t toInt64WithDefault(int i, int64_t default_int);
|
| 294 |
+
inline double toDouble(int i);
|
| 295 |
+
inline double toDoubleWithDefault(int i, double default_double);
|
| 296 |
+
inline c10::complex<double> toComplex(int i);
|
| 297 |
+
inline c10::complex<double> toComplexWithDefault(
|
| 298 |
+
int i,
|
| 299 |
+
c10::complex<double> default_complex);
|
| 300 |
+
inline bool toBool(int i);
|
| 301 |
+
inline bool toBoolWithDefault(int i, bool default_bool);
|
| 302 |
+
inline bool isNone(int i);
|
| 303 |
+
inline std::optional<c10::DispatchKeySet> toDispatchKeySetOptional(int i);
|
| 304 |
+
|
| 305 |
+
private:
|
| 306 |
+
at::Tensor tensor_slow(int i);
|
| 307 |
+
at::Scalar scalar_slow(int i);
|
| 308 |
+
at::Scalar scalar_slow(PyObject* arg);
|
| 309 |
+
};
|
| 310 |
+
|
| 311 |
+
// FunctionParameter is a single formal parameter of a Python function.
|
| 312 |
+
// It is immutable once constructed.
|
| 313 |
+
struct FunctionParameter {
|
| 314 |
+
FunctionParameter(const std::string& fmt, bool keyword_only);
|
| 315 |
+
|
| 316 |
+
bool check(
|
| 317 |
+
PyObject* obj,
|
| 318 |
+
std::vector<PyObject*>& overloaded_args,
|
| 319 |
+
int argnum,
|
| 320 |
+
int64_t* failed_idx = nullptr);
|
| 321 |
+
|
| 322 |
+
void set_default_str(const std::string& str);
|
| 323 |
+
std::string type_name() const;
|
| 324 |
+
|
| 325 |
+
ParameterType type_;
|
| 326 |
+
bool optional;
|
| 327 |
+
bool allow_none;
|
| 328 |
+
bool keyword_only;
|
| 329 |
+
bool allow_numbers_as_tensors = false;
|
| 330 |
+
int size;
|
| 331 |
+
std::string name;
|
| 332 |
+
// having this as a raw PyObject * will presumably leak it, but these are only
|
| 333 |
+
// held by static objects anyway, and Py_Finalize can already be called when
|
| 334 |
+
// this is destructed.
|
| 335 |
+
PyObject* python_name;
|
| 336 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
|
| 337 |
+
at::SmallVector<PyObject*, 5> numpy_python_names;
|
| 338 |
+
at::Scalar default_scalar;
|
| 339 |
+
std::vector<int64_t> default_intlist;
|
| 340 |
+
std::string default_string;
|
| 341 |
+
union {
|
| 342 |
+
bool default_bool;
|
| 343 |
+
int64_t default_int;
|
| 344 |
+
double default_double;
|
| 345 |
+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
| 346 |
+
double default_complex[2]; // see Scalar
|
| 347 |
+
at::ScalarType default_scalartype;
|
| 348 |
+
at::Layout default_layout;
|
| 349 |
+
};
|
| 350 |
+
std::string default_value;
|
| 351 |
+
};
|
| 352 |
+
|
| 353 |
+
template <int N>
|
| 354 |
+
inline PythonArgs PythonArgParser::parse(
|
| 355 |
+
PyObject* self,
|
| 356 |
+
PyObject* args,
|
| 357 |
+
PyObject* kwargs,
|
| 358 |
+
ParsedArgs<N>& dst) {
|
| 359 |
+
TORCH_CHECK_VALUE(
|
| 360 |
+
N >= max_args,
|
| 361 |
+
"PythonArgParser: dst ParsedArgs buffer does not have enough capacity, expected ",
|
| 362 |
+
max_args,
|
| 363 |
+
" (got ",
|
| 364 |
+
N,
|
| 365 |
+
")");
|
| 366 |
+
return raw_parse(self, args, kwargs, dst.args);
|
| 367 |
+
}
|
| 368 |
+
|
| 369 |
+
template <int N>
|
| 370 |
+
inline PythonArgs PythonArgParser::parse(
|
| 371 |
+
PyObject* args,
|
| 372 |
+
PyObject* kwargs,
|
| 373 |
+
ParsedArgs<N>& dst) {
|
| 374 |
+
return parse(nullptr, args, kwargs, dst);
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
inline PythonArgs PythonArgParser::parse(PyObject* self, ParsedArgs<0>& dst) {
|
| 378 |
+
return parse(self, nullptr, nullptr, dst);
|
| 379 |
+
}
|
| 380 |
+
|
| 381 |
+
inline bool PythonArgs::has_torch_function() {
|
| 382 |
+
return !overloaded_args.empty() || at::impl::torch_function_mode_enabled();
|
| 383 |
+
}
|
| 384 |
+
|
| 385 |
+
inline std::string PythonArgs::get_func_name() {
|
| 386 |
+
return signature.name;
|
| 387 |
+
}
|
| 388 |
+
|
| 389 |
+
// TODO: this can return MaybeOwned
|
| 390 |
+
inline at::Tensor PythonArgs::tensor(int i) {
|
| 391 |
+
if (args[i] && THPVariable_CheckExact(args[i])) {
|
| 392 |
+
return THPVariable_Unpack(args[i]);
|
| 393 |
+
}
|
| 394 |
+
return tensor_slow(i);
|
| 395 |
+
}
|
| 396 |
+
|
| 397 |
+
inline std::optional<at::Tensor> PythonArgs::optionalTensor(int i) {
|
| 398 |
+
at::Tensor t = tensor(i);
|
| 399 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 400 |
+
if (t.defined()) {
|
| 401 |
+
return t;
|
| 402 |
+
} else {
|
| 403 |
+
return std::nullopt;
|
| 404 |
+
}
|
| 405 |
+
}
|
| 406 |
+
|
| 407 |
+
inline at::Scalar PythonArgs::scalar(int i) {
|
| 408 |
+
if (!args[i])
|
| 409 |
+
return signature.params[i].default_scalar;
|
| 410 |
+
return scalar_slow(i);
|
| 411 |
+
}
|
| 412 |
+
|
| 413 |
+
inline std::vector<at::Scalar> PythonArgs::scalarlist(int i) {
|
| 414 |
+
if (!args[i])
|
| 415 |
+
return std::vector<at::Scalar>();
|
| 416 |
+
auto tuple = six::isTuple(args[i]);
|
| 417 |
+
THPObjectPtr arg = six::maybeAsTuple(args[i]);
|
| 418 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 419 |
+
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
|
| 420 |
+
std::vector<at::Scalar> res(size);
|
| 421 |
+
for (const auto idx : c10::irange(size)) {
|
| 422 |
+
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
|
| 423 |
+
: PyList_GET_ITEM(arg.get(), idx);
|
| 424 |
+
res[idx] = scalar_slow(obj);
|
| 425 |
+
}
|
| 426 |
+
return res;
|
| 427 |
+
}
|
| 428 |
+
|
| 429 |
+
inline at::Scalar PythonArgs::scalarWithDefault(
|
| 430 |
+
int i,
|
| 431 |
+
const at::Scalar& default_scalar) {
|
| 432 |
+
if (!args[i])
|
| 433 |
+
return default_scalar;
|
| 434 |
+
return scalar_slow(i);
|
| 435 |
+
}
|
| 436 |
+
|
| 437 |
+
inline std::optional<at::Scalar> PythonArgs::scalarOptional(int i) {
|
| 438 |
+
if (!args[i])
|
| 439 |
+
return std::nullopt;
|
| 440 |
+
return scalar_slow(i);
|
| 441 |
+
}
|
| 442 |
+
|
| 443 |
+
inline std::vector<at::Tensor> PythonArgs::tensorlist(int i) {
|
| 444 |
+
if (!args[i])
|
| 445 |
+
return std::vector<at::Tensor>();
|
| 446 |
+
auto tuple = six::isTuple(args[i]);
|
| 447 |
+
THPObjectPtr arg = six::maybeAsTuple(args[i]);
|
| 448 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 449 |
+
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
|
| 450 |
+
std::vector<at::Tensor> res(size);
|
| 451 |
+
for (const auto idx : c10::irange(size)) {
|
| 452 |
+
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
|
| 453 |
+
: PyList_GET_ITEM(arg.get(), idx);
|
| 454 |
+
// This is checked by the argument parser so it's safe to cast without
|
| 455 |
+
// checking if this is a tensor first
|
| 456 |
+
res[idx] = THPVariable_Unpack(obj);
|
| 457 |
+
}
|
| 458 |
+
return res;
|
| 459 |
+
}
|
| 460 |
+
|
| 461 |
+
inline torch::List<std::optional<at::Tensor>> PythonArgs::
|
| 462 |
+
list_of_optional_tensors(int i) {
|
| 463 |
+
if (!args[i])
|
| 464 |
+
return torch::List<std::optional<at::Tensor>>();
|
| 465 |
+
auto tuple = six::isTuple(args[i]);
|
| 466 |
+
THPObjectPtr arg = six::maybeAsTuple(args[i]);
|
| 467 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 468 |
+
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
|
| 469 |
+
torch::List<std::optional<at::Tensor>> res;
|
| 470 |
+
res.reserve(size);
|
| 471 |
+
for (const auto idx : c10::irange(size)) {
|
| 472 |
+
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
|
| 473 |
+
: PyList_GET_ITEM(arg.get(), idx);
|
| 474 |
+
// This is checked by the argument parser so it's safe to cast without
|
| 475 |
+
// checking if this is a tensor first
|
| 476 |
+
res.push_back(THPVariable_Unpack(obj));
|
| 477 |
+
}
|
| 478 |
+
return res;
|
| 479 |
+
}
|
| 480 |
+
|
| 481 |
+
template <int N>
|
| 482 |
+
inline std::array<at::Tensor, N> PythonArgs::tensorlist_n(int i) {
|
| 483 |
+
auto res = std::array<at::Tensor, N>();
|
| 484 |
+
if (!args[i])
|
| 485 |
+
return res;
|
| 486 |
+
auto tuple = six::isTuple(args[i]);
|
| 487 |
+
THPObjectPtr arg = six::maybeAsTuple(args[i]);
|
| 488 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 489 |
+
auto size = tuple ? PyTuple_GET_SIZE(arg.get()) : PyList_GET_SIZE(arg.get());
|
| 490 |
+
if (size != N) {
|
| 491 |
+
throw TypeError("expected tuple of %d elements but got %d", N, (int)size);
|
| 492 |
+
}
|
| 493 |
+
for (const auto idx : c10::irange(size)) {
|
| 494 |
+
PyObject* obj = tuple ? PyTuple_GET_ITEM(arg.get(), idx)
|
| 495 |
+
: PyList_GET_ITEM(arg.get(), idx);
|
| 496 |
+
// This is checked by the argument parser so it's safe to cast without
|
| 497 |
+
// checking if this is a tensor first
|
| 498 |
+
res[idx] = THPVariable_Unpack(obj);
|
| 499 |
+
}
|
| 500 |
+
return res;
|
| 501 |
+
}
|
| 502 |
+
|
| 503 |
+
inline std::vector<int64_t> PythonArgs::intlist(int i) {
|
| 504 |
+
return intlistWithDefault(i, signature.params[i].default_intlist);
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
inline PyObject* toPyObject(const c10::SymInt& symint) {
|
| 508 |
+
if (symint.is_symbolic()) {
|
| 509 |
+
auto r = py::cast(symint).release().ptr();
|
| 510 |
+
TORCH_INTERNAL_ASSERT(r);
|
| 511 |
+
return r;
|
| 512 |
+
} else {
|
| 513 |
+
auto m = symint.maybe_as_int();
|
| 514 |
+
return THPUtils_packInt64(*m);
|
| 515 |
+
}
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
inline void throw_intlist_exception(
|
| 519 |
+
const torch::PythonArgs* args,
|
| 520 |
+
size_t i,
|
| 521 |
+
PyObject* obj,
|
| 522 |
+
size_t idx,
|
| 523 |
+
const std::exception& e = python_error()) {
|
| 524 |
+
std::string error = strlen(e.what())
|
| 525 |
+
? e.what()
|
| 526 |
+
: std::string("type must be ") + args->signature.params[i].type_name() +
|
| 527 |
+
",but got " + Py_TYPE(obj)->tp_name;
|
| 528 |
+
throw TypeError(
|
| 529 |
+
"%s(): argument '%s' failed to unpack the object at pos %zu with error \"%s\"",
|
| 530 |
+
args->signature.name.c_str(),
|
| 531 |
+
args->signature.params[i].name.c_str(),
|
| 532 |
+
idx + 1,
|
| 533 |
+
error.c_str());
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
inline std::vector<c10::SymInt> PythonArgs::symintlist(int i) {
|
| 537 |
+
if (!args[i]) {
|
| 538 |
+
return c10::fmap(signature.params[i].default_intlist, [](int64_t di) {
|
| 539 |
+
return c10::SymInt(di);
|
| 540 |
+
});
|
| 541 |
+
}
|
| 542 |
+
|
| 543 |
+
const auto size1 = signature.params[i].size;
|
| 544 |
+
if (size1 > 0 && THPUtils_checkLong(args[i])) {
|
| 545 |
+
return std::vector<c10::SymInt>(
|
| 546 |
+
size1, c10::SymInt(THPUtils_unpackLong(args[i])));
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
if (size1 > 0 && torch::is_symint(py::handle(args[i]))) {
|
| 550 |
+
auto si = py::handle(args[i]).cast<c10::SymInt>();
|
| 551 |
+
return std::vector<c10::SymInt>(size1, si);
|
| 552 |
+
}
|
| 553 |
+
|
| 554 |
+
PyObject* arg = args[i];
|
| 555 |
+
auto tuple = PyTuple_Check(arg);
|
| 556 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 557 |
+
const auto size2 = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
|
| 558 |
+
std::vector<c10::SymInt> res;
|
| 559 |
+
res.reserve(size2);
|
| 560 |
+
for (const auto idx : c10::irange(size2)) {
|
| 561 |
+
PyObject* obj =
|
| 562 |
+
tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
|
| 563 |
+
|
| 564 |
+
// Elements of torch.Size are tensors during tracing, and we need to
|
| 565 |
+
// record extra information before they are turned into an IntArrayRef
|
| 566 |
+
if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) {
|
| 567 |
+
auto& var = THPVariable_Unpack(obj);
|
| 568 |
+
jit::tracer::ArgumentStash::stashIntArrayRefElem(
|
| 569 |
+
signature.params[i].name, size2, idx, var);
|
| 570 |
+
try {
|
| 571 |
+
res.emplace_back(var.item<int64_t>());
|
| 572 |
+
continue;
|
| 573 |
+
} catch (std::exception& e) {
|
| 574 |
+
throw_intlist_exception(this, i, obj, idx, e);
|
| 575 |
+
}
|
| 576 |
+
continue;
|
| 577 |
+
} else {
|
| 578 |
+
// convert tensor to scalar outside of try / catch,
|
| 579 |
+
// so that Tensor subclass exceptions will not be caught.
|
| 580 |
+
if (THPUtils_checkLongExact(obj)) {
|
| 581 |
+
// Fast path for plain numbers
|
| 582 |
+
try {
|
| 583 |
+
res.emplace_back(THPUtils_unpackLong(obj));
|
| 584 |
+
} catch (std::exception& e) {
|
| 585 |
+
throw_intlist_exception(this, i, obj, idx, e);
|
| 586 |
+
}
|
| 587 |
+
} else if (THPVariable_Check(obj)) {
|
| 588 |
+
auto& var = THPVariable_Unpack(obj);
|
| 589 |
+
if (var.numel() != 1 ||
|
| 590 |
+
!at::isIntegralType(
|
| 591 |
+
var.dtype().toScalarType(), /*include_bool*/ true)) {
|
| 592 |
+
throw_intlist_exception(this, i, obj, idx);
|
| 593 |
+
}
|
| 594 |
+
auto scalar = var.item();
|
| 595 |
+
TORCH_CHECK(scalar.isIntegral(/*include bool*/ false));
|
| 596 |
+
res.push_back(scalar.toSymInt());
|
| 597 |
+
} else {
|
| 598 |
+
try {
|
| 599 |
+
if (is_symint(py::handle(obj))) {
|
| 600 |
+
res.push_back(py::handle(obj).cast<c10::SymInt>());
|
| 601 |
+
} else {
|
| 602 |
+
res.emplace_back(THPUtils_unpackIndex(obj));
|
| 603 |
+
}
|
| 604 |
+
} catch (std::exception& e) {
|
| 605 |
+
throw_intlist_exception(this, i, obj, idx, e);
|
| 606 |
+
}
|
| 607 |
+
}
|
| 608 |
+
}
|
| 609 |
+
}
|
| 610 |
+
|
| 611 |
+
return res;
|
| 612 |
+
}
|
| 613 |
+
|
| 614 |
+
inline std::vector<int64_t> PythonArgs::intlistWithDefault(
|
| 615 |
+
int i,
|
| 616 |
+
std::vector<int64_t> default_intlist) {
|
| 617 |
+
if (!args[i])
|
| 618 |
+
return default_intlist;
|
| 619 |
+
PyObject* arg = args[i];
|
| 620 |
+
const auto size1 = signature.params[i].size;
|
| 621 |
+
if (size1 > 0 && THPUtils_checkLong(arg)) {
|
| 622 |
+
return std::vector<int64_t>(size1, THPUtils_unpackLong(arg));
|
| 623 |
+
}
|
| 624 |
+
if (size1 > 0 && torch::is_symint(py::handle(arg))) {
|
| 625 |
+
return std::vector<int64_t>(
|
| 626 |
+
size1,
|
| 627 |
+
py::handle(arg).cast<c10::SymInt>().guard_int(__FILE__, __LINE__));
|
| 628 |
+
}
|
| 629 |
+
auto tuple = PyTuple_Check(arg);
|
| 630 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 631 |
+
const auto size2 = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
|
| 632 |
+
std::vector<int64_t> res(size2);
|
| 633 |
+
for (const auto idx : c10::irange(size2)) {
|
| 634 |
+
PyObject* obj =
|
| 635 |
+
tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
|
| 636 |
+
// Elements of torch.Size are tensors during tracing, and we need to
|
| 637 |
+
// record extra information before they are turned into an IntArrayRef
|
| 638 |
+
if (traceable && jit::tracer::isTracing() && THPVariable_Check(obj)) {
|
| 639 |
+
auto& var = THPVariable_Unpack(obj);
|
| 640 |
+
jit::tracer::ArgumentStash::stashIntArrayRefElem(
|
| 641 |
+
signature.params[i].name, size2, idx, var);
|
| 642 |
+
try {
|
| 643 |
+
res[idx] = var.item<int64_t>();
|
| 644 |
+
continue;
|
| 645 |
+
} catch (std::exception& e) {
|
| 646 |
+
throw_intlist_exception(this, i, obj, idx, e);
|
| 647 |
+
}
|
| 648 |
+
} else {
|
| 649 |
+
// convert tensor to scalar outside of try / catch,
|
| 650 |
+
// so that Tensor subclass exceptions will not be caught.
|
| 651 |
+
if (THPUtils_checkLongExact(obj)) {
|
| 652 |
+
// Fast path for plain numbers
|
| 653 |
+
try {
|
| 654 |
+
res[idx] = THPUtils_unpackLong(obj);
|
| 655 |
+
} catch (std::exception& e) {
|
| 656 |
+
throw_intlist_exception(this, i, obj, idx, e);
|
| 657 |
+
}
|
| 658 |
+
} else if (torch::is_symint(py::handle(obj))) {
|
| 659 |
+
res[idx] = py::cast<c10::SymInt>(py::handle(obj))
|
| 660 |
+
.guard_int(__FILE__, __LINE__);
|
| 661 |
+
} else if (THPVariable_Check(obj)) {
|
| 662 |
+
auto& var = THPVariable_Unpack(obj);
|
| 663 |
+
if (var.numel() != 1 ||
|
| 664 |
+
!at::isIntegralType(
|
| 665 |
+
var.dtype().toScalarType(), /*include_bool*/ true)) {
|
| 666 |
+
throw_intlist_exception(this, i, obj, idx);
|
| 667 |
+
}
|
| 668 |
+
res[idx] = var.item<int64_t>();
|
| 669 |
+
} else {
|
| 670 |
+
try {
|
| 671 |
+
res[idx] = THPUtils_unpackIndex(obj);
|
| 672 |
+
} catch (std::exception& e) {
|
| 673 |
+
throw_intlist_exception(this, i, obj, idx, e);
|
| 674 |
+
}
|
| 675 |
+
}
|
| 676 |
+
}
|
| 677 |
+
}
|
| 678 |
+
return res;
|
| 679 |
+
}
|
| 680 |
+
|
| 681 |
+
inline c10::OptionalArray<int64_t> PythonArgs::intlistOptional(int i) {
|
| 682 |
+
if (!args[i]) {
|
| 683 |
+
return {};
|
| 684 |
+
}
|
| 685 |
+
return intlist(i);
|
| 686 |
+
}
|
| 687 |
+
|
| 688 |
+
inline c10::OptionalArray<c10::SymInt> PythonArgs::symintlistOptional(int i) {
|
| 689 |
+
if (!args[i]) {
|
| 690 |
+
return {};
|
| 691 |
+
}
|
| 692 |
+
return symintlist(i);
|
| 693 |
+
}
|
| 694 |
+
|
| 695 |
+
inline std::vector<double> PythonArgs::getDoublelist(int i) {
|
| 696 |
+
PyObject* arg = args[i];
|
| 697 |
+
auto tuple = PyTuple_Check(arg);
|
| 698 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 699 |
+
auto size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
|
| 700 |
+
std::vector<double> res(size);
|
| 701 |
+
for (const auto idx : c10::irange(size)) {
|
| 702 |
+
PyObject* obj =
|
| 703 |
+
tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
|
| 704 |
+
try {
|
| 705 |
+
res[idx] = THPUtils_unpackDouble(obj);
|
| 706 |
+
} catch (const std::exception&) {
|
| 707 |
+
throw TypeError(
|
| 708 |
+
"%s(): argument '%s' must be %s, but found element of type %s at pos %zu",
|
| 709 |
+
signature.name.c_str(),
|
| 710 |
+
signature.params[i].name.c_str(),
|
| 711 |
+
signature.params[i].type_name().c_str(),
|
| 712 |
+
Py_TYPE(obj)->tp_name,
|
| 713 |
+
idx + 1);
|
| 714 |
+
}
|
| 715 |
+
}
|
| 716 |
+
return res;
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
inline c10::OptionalArray<double> PythonArgs::doublelistOptional(int i) {
|
| 720 |
+
if (!args[i]) {
|
| 721 |
+
return {};
|
| 722 |
+
}
|
| 723 |
+
return this->getDoublelist(i);
|
| 724 |
+
}
|
| 725 |
+
|
| 726 |
+
inline std::vector<double> PythonArgs::doublelist(int i) {
|
| 727 |
+
if (!args[i]) {
|
| 728 |
+
return {};
|
| 729 |
+
}
|
| 730 |
+
return this->getDoublelist(i);
|
| 731 |
+
}
|
| 732 |
+
|
| 733 |
+
inline std::optional<c10::DispatchKeySet> PythonArgs::toDispatchKeySetOptional(
|
| 734 |
+
int i) {
|
| 735 |
+
if (!args[i]) {
|
| 736 |
+
return {};
|
| 737 |
+
}
|
| 738 |
+
return py::cast<c10::DispatchKeySet>(py::handle(args[i]));
|
| 739 |
+
}
|
| 740 |
+
|
| 741 |
+
inline at::ScalarType PythonArgs::scalartypeWithDefault(
|
| 742 |
+
int i,
|
| 743 |
+
at::ScalarType default_scalartype) {
|
| 744 |
+
if (!args[i])
|
| 745 |
+
return default_scalartype;
|
| 746 |
+
return scalartype(i);
|
| 747 |
+
}
|
| 748 |
+
|
| 749 |
+
inline at::ScalarType toScalarType(PyObject* obj) {
|
| 750 |
+
if (obj == (PyObject*)&PyFloat_Type) {
|
| 751 |
+
return at::ScalarType::Double;
|
| 752 |
+
}
|
| 753 |
+
if (obj == (PyObject*)&PyBool_Type) {
|
| 754 |
+
return at::ScalarType::Bool;
|
| 755 |
+
}
|
| 756 |
+
if (obj == (PyObject*)&PyLong_Type) {
|
| 757 |
+
return at::ScalarType::Long;
|
| 758 |
+
}
|
| 759 |
+
if (obj == (PyObject*)&PyComplex_Type) {
|
| 760 |
+
return at::ScalarType::ComplexDouble;
|
| 761 |
+
}
|
| 762 |
+
return reinterpret_cast<THPDtype*>(obj)->scalar_type;
|
| 763 |
+
}
|
| 764 |
+
|
| 765 |
+
inline at::ScalarType PythonArgs::scalartype(int i) {
|
| 766 |
+
if (!args[i]) {
|
| 767 |
+
auto scalartype = signature.params[i].default_scalartype;
|
| 768 |
+
return (scalartype == at::ScalarType::Undefined)
|
| 769 |
+
? torch::tensors::get_default_scalar_type()
|
| 770 |
+
: scalartype;
|
| 771 |
+
}
|
| 772 |
+
PyObject* obj = args[i];
|
| 773 |
+
return toScalarType(obj);
|
| 774 |
+
}
|
| 775 |
+
|
| 776 |
+
inline std::optional<at::ScalarType> PythonArgs::scalartypeOptional(int i) {
|
| 777 |
+
if (!args[i])
|
| 778 |
+
return std::nullopt;
|
| 779 |
+
return scalartype(i);
|
| 780 |
+
}
|
| 781 |
+
|
| 782 |
+
inline at::Layout toLayout(PyObject* obj) {
|
| 783 |
+
const auto layout = reinterpret_cast<THPLayout*>(obj);
|
| 784 |
+
return layout->layout;
|
| 785 |
+
}
|
| 786 |
+
|
| 787 |
+
inline at::Layout PythonArgs::layout(int i) {
|
| 788 |
+
if (!args[i])
|
| 789 |
+
return signature.params[i].default_layout;
|
| 790 |
+
return toLayout(args[i]);
|
| 791 |
+
}
|
| 792 |
+
|
| 793 |
+
inline at::Layout PythonArgs::layoutWithDefault(
|
| 794 |
+
int i,
|
| 795 |
+
at::Layout default_layout) {
|
| 796 |
+
if (!args[i])
|
| 797 |
+
return default_layout;
|
| 798 |
+
return layout(i);
|
| 799 |
+
}
|
| 800 |
+
|
| 801 |
+
inline std::optional<at::Layout> PythonArgs::layoutOptional(int i) {
|
| 802 |
+
if (!args[i])
|
| 803 |
+
return std::nullopt;
|
| 804 |
+
return layout(i);
|
| 805 |
+
}
|
| 806 |
+
|
| 807 |
+
inline at::Device deviceFromLong(int64_t device_index) {
|
| 808 |
+
TORCH_CHECK(device_index >= 0, "Device index must not be negative");
|
| 809 |
+
return at::Device(
|
| 810 |
+
at::getAccelerator(true).value(),
|
| 811 |
+
static_cast<c10::DeviceIndex>(device_index));
|
| 812 |
+
}
|
| 813 |
+
|
| 814 |
+
inline at::Device toDevice(PyObject* obj) {
|
| 815 |
+
if (THPDevice_Check(obj)) {
|
| 816 |
+
const auto device = reinterpret_cast<THPDevice*>(obj);
|
| 817 |
+
return device->device;
|
| 818 |
+
}
|
| 819 |
+
if (THPUtils_checkLong(obj)) {
|
| 820 |
+
return deviceFromLong(THPUtils_unpackLong(obj));
|
| 821 |
+
}
|
| 822 |
+
if (torch::is_symint(py::handle(obj))) {
|
| 823 |
+
auto device_index =
|
| 824 |
+
py::cast<c10::SymInt>(py::handle(obj)).guard_int(__FILE__, __LINE__);
|
| 825 |
+
return deviceFromLong(device_index);
|
| 826 |
+
}
|
| 827 |
+
const std::string& device_str = THPUtils_unpackString(obj);
|
| 828 |
+
return at::Device(device_str);
|
| 829 |
+
}
|
| 830 |
+
|
| 831 |
+
inline at::Device PythonArgs::device(int i) {
|
| 832 |
+
if (!args[i]) {
|
| 833 |
+
return torch::tensors::get_default_device();
|
| 834 |
+
}
|
| 835 |
+
return toDevice(args[i]);
|
| 836 |
+
}
|
| 837 |
+
|
| 838 |
+
inline at::Device PythonArgs::deviceWithDefault(
|
| 839 |
+
int i,
|
| 840 |
+
const at::Device& default_device) {
|
| 841 |
+
if (!args[i])
|
| 842 |
+
return default_device;
|
| 843 |
+
return device(i);
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
inline std::optional<at::Device> PythonArgs::deviceOptional(int i) {
|
| 847 |
+
if (!args[i])
|
| 848 |
+
return std::nullopt;
|
| 849 |
+
return device(i);
|
| 850 |
+
}
|
| 851 |
+
|
| 852 |
+
inline at::Dimname PythonArgs::dimname(int i) {
|
| 853 |
+
TORCH_INTERNAL_ASSERT(args[i] != nullptr);
|
| 854 |
+
return THPDimname_parse(args[i]);
|
| 855 |
+
}
|
| 856 |
+
|
| 857 |
+
inline std::vector<at::Dimname> parseDimnameList(PyObject* arg) {
|
| 858 |
+
auto tuple = PyTuple_Check(arg);
|
| 859 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
| 860 |
+
auto size = tuple ? PyTuple_GET_SIZE(arg) : PyList_GET_SIZE(arg);
|
| 861 |
+
std::vector<at::Dimname> res;
|
| 862 |
+
res.reserve(size);
|
| 863 |
+
for (const auto idx : c10::irange(size)) {
|
| 864 |
+
PyObject* obj =
|
| 865 |
+
tuple ? PyTuple_GET_ITEM(arg, idx) : PyList_GET_ITEM(arg, idx);
|
| 866 |
+
res.push_back(THPDimname_parse(obj));
|
| 867 |
+
}
|
| 868 |
+
return res;
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
inline std::optional<std::vector<at::Dimname>> PythonArgs::
|
| 872 |
+
toDimnameListOptional(int i) {
|
| 873 |
+
if (!args[i])
|
| 874 |
+
return std::nullopt;
|
| 875 |
+
return parseDimnameList(args[i]);
|
| 876 |
+
}
|
| 877 |
+
|
| 878 |
+
inline std::vector<at::Dimname> PythonArgs::dimnamelist(int i) {
|
| 879 |
+
TORCH_INTERNAL_ASSERT(args[i]);
|
| 880 |
+
PyObject* arg = args[i];
|
| 881 |
+
auto size = signature.params[i].size;
|
| 882 |
+
TORCH_INTERNAL_ASSERT(size == 0 || size == 1);
|
| 883 |
+
if (size == 1 && THPUtils_checkDimname(arg)) {
|
| 884 |
+
return {THPDimname_parse(arg)};
|
| 885 |
+
}
|
| 886 |
+
return parseDimnameList(arg);
|
| 887 |
+
}
|
| 888 |
+
|
| 889 |
+
inline at::MemoryFormat PythonArgs::memoryformat(int i) {
|
| 890 |
+
if (!args[i])
|
| 891 |
+
return at::MemoryFormat::Contiguous;
|
| 892 |
+
TORCH_CHECK(
|
| 893 |
+
THPMemoryFormat_Check(args[i]),
|
| 894 |
+
"memory_format arg must be an instance of the torch.memory_format");
|
| 895 |
+
const auto memory_format = reinterpret_cast<THPMemoryFormat*>(args[i]);
|
| 896 |
+
return memory_format->memory_format;
|
| 897 |
+
}
|
| 898 |
+
|
| 899 |
+
inline std::optional<at::MemoryFormat> PythonArgs::memoryformatOptional(int i) {
|
| 900 |
+
if (!args[i])
|
| 901 |
+
return std::nullopt;
|
| 902 |
+
return memoryformat(i);
|
| 903 |
+
}
|
| 904 |
+
|
| 905 |
+
inline at::QScheme PythonArgs::toQScheme(int i) {
|
| 906 |
+
if (!args[i])
|
| 907 |
+
return at::kPerTensorAffine;
|
| 908 |
+
TORCH_CHECK(
|
| 909 |
+
THPQScheme_Check(args[i]),
|
| 910 |
+
"qscheme arg must be an instance of the torch.qscheme");
|
| 911 |
+
const auto qscheme = reinterpret_cast<THPQScheme*>(args[i]);
|
| 912 |
+
return qscheme->qscheme;
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
inline std::string PythonArgs::string(int i) {
|
| 916 |
+
return stringWithDefault(i, signature.params[i].default_string);
|
| 917 |
+
}
|
| 918 |
+
|
| 919 |
+
inline std::string PythonArgs::stringWithDefault(
|
| 920 |
+
int i,
|
| 921 |
+
const std::string& default_str) {
|
| 922 |
+
if (!args[i])
|
| 923 |
+
return default_str;
|
| 924 |
+
return THPUtils_unpackString(args[i]);
|
| 925 |
+
}
|
| 926 |
+
|
| 927 |
+
inline std::optional<std::string> PythonArgs::stringOptional(int i) {
|
| 928 |
+
if (!args[i])
|
| 929 |
+
return std::nullopt;
|
| 930 |
+
return THPUtils_unpackString(args[i]);
|
| 931 |
+
}
|
| 932 |
+
|
| 933 |
+
inline c10::string_view PythonArgs::stringView(int i) {
|
| 934 |
+
return stringViewWithDefault(i, signature.params[i].default_string);
|
| 935 |
+
}
|
| 936 |
+
|
| 937 |
+
inline c10::string_view PythonArgs::stringViewWithDefault(
|
| 938 |
+
int i,
|
| 939 |
+
const c10::string_view default_str) {
|
| 940 |
+
if (!args[i])
|
| 941 |
+
return default_str;
|
| 942 |
+
return THPUtils_unpackStringView(args[i]);
|
| 943 |
+
}
|
| 944 |
+
|
| 945 |
+
inline std::optional<c10::string_view> PythonArgs::stringViewOptional(int i) {
|
| 946 |
+
if (!args[i])
|
| 947 |
+
return std::nullopt;
|
| 948 |
+
return THPUtils_unpackStringView(args[i]);
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
inline int64_t PythonArgs::toInt64(int i) {
|
| 952 |
+
if (!args[i])
|
| 953 |
+
return signature.params[i].default_int;
|
| 954 |
+
if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) {
|
| 955 |
+
auto& var = THPVariable_Unpack(args[i]);
|
| 956 |
+
jit::tracer::ArgumentStash::stashValue(
|
| 957 |
+
signature.params[i].name, idx, var, c10::IntType::get());
|
| 958 |
+
}
|
| 959 |
+
if (torch::is_symint(py::handle(args[i]))) {
|
| 960 |
+
return py::cast<c10::SymInt>(py::handle(args[i]))
|
| 961 |
+
.guard_int(__FILE__, __LINE__);
|
| 962 |
+
}
|
| 963 |
+
return THPUtils_unpackLong(args[i]);
|
| 964 |
+
}
|
| 965 |
+
|
| 966 |
+
inline c10::SymInt PythonArgs::toSymInt(int i) {
|
| 967 |
+
if (!args[i]) {
|
| 968 |
+
return c10::SymInt(signature.params[i].default_int);
|
| 969 |
+
}
|
| 970 |
+
|
| 971 |
+
if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) {
|
| 972 |
+
auto& var = THPVariable_Unpack(args[i]);
|
| 973 |
+
jit::tracer::ArgumentStash::stashValue(
|
| 974 |
+
signature.params[i].name, idx, var, c10::IntType::get());
|
| 975 |
+
}
|
| 976 |
+
|
| 977 |
+
return py::cast<c10::SymInt>(py::handle(args[i]));
|
| 978 |
+
}
|
| 979 |
+
|
| 980 |
+
inline c10::SymBool PythonArgs::toSymBool(int i) {
|
| 981 |
+
if (!args[i]) {
|
| 982 |
+
return c10::SymBool(signature.params[i].default_bool);
|
| 983 |
+
}
|
| 984 |
+
if (traceable && jit::tracer::isTracing() && THPVariable_Check(args[i])) {
|
| 985 |
+
auto& var = THPVariable_Unpack(args[i]);
|
| 986 |
+
jit::tracer::ArgumentStash::stashValue(
|
| 987 |
+
signature.params[i].name, idx, var, c10::BoolType::get());
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
return py::cast<c10::SymBool>(py::handle(args[i]));
|
| 991 |
+
}
|
| 992 |
+
|
| 993 |
+
inline int64_t PythonArgs::toInt64WithDefault(int i, int64_t default_int) {
|
| 994 |
+
if (!args[i])
|
| 995 |
+
return default_int;
|
| 996 |
+
return toInt64(i);
|
| 997 |
+
}
|
| 998 |
+
|
| 999 |
+
inline std::optional<int64_t> PythonArgs::toInt64Optional(int i) {
|
| 1000 |
+
if (!args[i])
|
| 1001 |
+
return std::nullopt;
|
| 1002 |
+
return toInt64(i);
|
| 1003 |
+
}
|
| 1004 |
+
|
| 1005 |
+
inline std::optional<c10::SymInt> PythonArgs::toSymIntOptional(int i) {
|
| 1006 |
+
if (!args[i])
|
| 1007 |
+
return std::nullopt;
|
| 1008 |
+
return toSymInt(i);
|
| 1009 |
+
}
|
| 1010 |
+
|
| 1011 |
+
inline std::optional<bool> PythonArgs::toBoolOptional(int i) {
|
| 1012 |
+
if (!args[i]) {
|
| 1013 |
+
return std::nullopt;
|
| 1014 |
+
}
|
| 1015 |
+
return toBool(i);
|
| 1016 |
+
}
|
| 1017 |
+
|
| 1018 |
+
inline std::optional<double> PythonArgs::toDoubleOptional(int i) {
|
| 1019 |
+
if (!args[i]) {
|
| 1020 |
+
return std::nullopt;
|
| 1021 |
+
}
|
| 1022 |
+
return toDouble(i);
|
| 1023 |
+
}
|
| 1024 |
+
|
| 1025 |
+
inline double PythonArgs::toDouble(int i) {
|
| 1026 |
+
if (!args[i])
|
| 1027 |
+
return signature.params[i].default_double;
|
| 1028 |
+
if (torch::is_symfloat(py::handle(args[i]))) {
|
| 1029 |
+
return py::cast<c10::SymFloat>(py::handle(args[i]))
|
| 1030 |
+
.guard_float(__FILE__, __LINE__);
|
| 1031 |
+
}
|
| 1032 |
+
if (torch::is_symint(py::handle(args[i]))) {
|
| 1033 |
+
return static_cast<double>(py::cast<c10::SymInt>(py::handle(args[i]))
|
| 1034 |
+
.guard_int(__FILE__, __LINE__));
|
| 1035 |
+
}
|
| 1036 |
+
return THPUtils_unpackDouble(args[i]);
|
| 1037 |
+
}
|
| 1038 |
+
|
| 1039 |
+
inline bool PythonArgs::toBool(int i) {
|
| 1040 |
+
if (!args[i])
|
| 1041 |
+
return signature.params[i].default_bool;
|
| 1042 |
+
if (torch::is_symbool(py::handle(args[i]))) {
|
| 1043 |
+
return py::cast<c10::SymBool>(py::handle(args[i]))
|
| 1044 |
+
.guard_bool(__FILE__, __LINE__);
|
| 1045 |
+
}
|
| 1046 |
+
return args[i] == Py_True;
|
| 1047 |
+
}
|
| 1048 |
+
|
| 1049 |
+
inline double PythonArgs::toDoubleWithDefault(int i, double default_double) {
|
| 1050 |
+
if (!args[i])
|
| 1051 |
+
return default_double;
|
| 1052 |
+
return toDouble(i);
|
| 1053 |
+
}
|
| 1054 |
+
|
| 1055 |
+
inline c10::complex<double> PythonArgs::toComplex(int i) {
|
| 1056 |
+
if (!args[i])
|
| 1057 |
+
return *(reinterpret_cast<const c10::complex<double>*>(
|
| 1058 |
+
signature.params[i].default_complex));
|
| 1059 |
+
return THPUtils_unpackComplexDouble(args[i]);
|
| 1060 |
+
}
|
| 1061 |
+
|
| 1062 |
+
inline c10::complex<double> PythonArgs::toComplexWithDefault(
|
| 1063 |
+
int i,
|
| 1064 |
+
c10::complex<double> default_value) {
|
| 1065 |
+
if (!args[i])
|
| 1066 |
+
return default_value;
|
| 1067 |
+
return toComplex(i);
|
| 1068 |
+
}
|
| 1069 |
+
|
| 1070 |
+
inline bool PythonArgs::toBoolWithDefault(int i, bool default_bool) {
|
| 1071 |
+
if (!args[i])
|
| 1072 |
+
return default_bool;
|
| 1073 |
+
return toBool(i);
|
| 1074 |
+
}
|
| 1075 |
+
|
| 1076 |
+
inline bool PythonArgs::isNone(int i) {
|
| 1077 |
+
return args[i] == nullptr;
|
| 1078 |
+
}
|
| 1079 |
+
|
| 1080 |
+
inline std::optional<at::Generator> PythonArgs::generator(int i) {
|
| 1081 |
+
if (!args[i])
|
| 1082 |
+
return std::nullopt;
|
| 1083 |
+
return reinterpret_cast<THPGenerator*>(args[i])->cdata;
|
| 1084 |
+
}
|
| 1085 |
+
|
| 1086 |
+
inline at::Storage PythonArgs::storage(int i) {
|
| 1087 |
+
if (!args[i])
|
| 1088 |
+
return at::Storage();
|
| 1089 |
+
return createStorage(args[i]);
|
| 1090 |
+
}
|
| 1091 |
+
|
| 1092 |
+
inline at::Storage PythonArgs::storage(
|
| 1093 |
+
int i,
|
| 1094 |
+
at::ScalarType& storage_scalar_type,
|
| 1095 |
+
bool& is_typed_storage) {
|
| 1096 |
+
at::Storage storage;
|
| 1097 |
+
if (!args[i]) {
|
| 1098 |
+
storage = at::Storage();
|
| 1099 |
+
is_typed_storage = false;
|
| 1100 |
+
storage_scalar_type = at::ScalarType::Undefined;
|
| 1101 |
+
} else {
|
| 1102 |
+
std::tie(storage, storage_scalar_type, is_typed_storage) =
|
| 1103 |
+
createStorageGetType(args[i]);
|
| 1104 |
+
}
|
| 1105 |
+
return storage;
|
| 1106 |
+
}
|
| 1107 |
+
|
| 1108 |
+
inline c10::Stream PythonArgs::stream(int i) {
|
| 1109 |
+
if (!args[i])
|
| 1110 |
+
return c10::Stream(
|
| 1111 |
+
c10::Stream::Default::DEFAULT, c10::Device(c10::DeviceType::CPU, -1));
|
| 1112 |
+
if (!THPStream_Check(args[i])) {
|
| 1113 |
+
throw TypeError(
|
| 1114 |
+
"expected Stream object. Got '%s'", Py_TYPE(args[i])->tp_name);
|
| 1115 |
+
}
|
| 1116 |
+
return c10::Stream::unpack3(
|
| 1117 |
+
((THPStream*)args[i])->stream_id,
|
| 1118 |
+
static_cast<c10::DeviceIndex>(((THPStream*)args[i])->device_index),
|
| 1119 |
+
static_cast<c10::DeviceType>(((THPStream*)args[i])->device_type));
|
| 1120 |
+
}
|
| 1121 |
+
|
| 1122 |
+
inline PyObject* PythonArgs::pyobject(int i) {
|
| 1123 |
+
if (!args[i])
|
| 1124 |
+
return Py_None;
|
| 1125 |
+
return args[i];
|
| 1126 |
+
}
|
| 1127 |
+
|
| 1128 |
+
/*
|
| 1129 |
+
*
|
| 1130 |
+
* Handle __torch_function__ overrides if we know that there are overloaded
|
| 1131 |
+
* arguments. All objects stored in r.overloaded_args must have a
|
| 1132 |
+
* __torch_function__ implementation and the arguments must be ordered in order
|
| 1133 |
+
* of precedence. Precedence goes from left to right in the order of the
|
| 1134 |
+
* signature of the function the overloaded arguments were passed to, except
|
| 1135 |
+
* subclasses are always considered before superclasses.
|
| 1136 |
+
*
|
| 1137 |
+
* If the result of calling __torch_function__ is NotImplemented, the
|
| 1138 |
+
* next implementation in the precedence order is called. If all
|
| 1139 |
+
* arguments return NotImplemented from their __torch_function__
|
| 1140 |
+
* implementation, a TypeError is raised in Python.
|
| 1141 |
+
*
|
| 1142 |
+
* Assumes overloaded_args has at least one entry. All entries must have
|
| 1143 |
+
* a __torch_function__ attribute that resolves to a callable that
|
| 1144 |
+
* accepts a torch API function, a tuple of arguments, and a dict of
|
| 1145 |
+
* keyword arguments for the torch API function.
|
| 1146 |
+
*
|
| 1147 |
+
* It is sufficient to call PythonArgs::has_torch_function before
|
| 1148 |
+
* calling this function to verify that there are valid arguments
|
| 1149 |
+
* present. If that is not done then special care must be taken to
|
| 1150 |
+
* ensure there are arguments that are overloaded with
|
| 1151 |
+
* __torch_function__.
|
| 1152 |
+
*
|
| 1153 |
+
* See torch._overrides.handle_torch_function for the equivalent
|
| 1154 |
+
* code in the pure-python implementation.
|
| 1155 |
+
*
|
| 1156 |
+
* 'r' is a parsed PythonArgs instance, returned from
|
| 1157 |
+
* PythonArgParser::parse.
|
| 1158 |
+
*
|
| 1159 |
+
* 'args' is a reference to the python tuple of arguments to the torch
|
| 1160 |
+
* API function.
|
| 1161 |
+
*
|
| 1162 |
+
* 'kwargs' is a reference to the python dict of keyword arguments to
|
| 1163 |
+
* the torch API function.
|
| 1164 |
+
*
|
| 1165 |
+
* 'torch_api' is a reference to a python torch API namespace.
|
| 1166 |
+
*
|
| 1167 |
+
* 'torch_api_function' is the reference to the original torch method, usually,
|
| 1168 |
+
* we can use torch_api and func_name to get torch_api_function. In some cases,
|
| 1169 |
+
* e.g., torch custom op, we create the function in C++, if we still use
|
| 1170 |
+
* torch_api and func_name to fetch original api, a cyclic call will happen.
|
| 1171 |
+
*
|
| 1172 |
+
* 'overloaded_args' is the args which have overloaded __torch_function__.
|
| 1173 |
+
*
|
| 1174 |
+
* 'func_name' is the named of the original torch method.
|
| 1175 |
+
*
|
| 1176 |
+
* TODO: we could use different names for the following 'handle_torch_function'
|
| 1177 |
+
* instead of overloading.
|
| 1178 |
+
*
|
| 1179 |
+
*/
|
| 1180 |
+
// Used for Tensor methods with arguments.
|
| 1181 |
+
auto handle_torch_function(
|
| 1182 |
+
PythonArgs& r,
|
| 1183 |
+
PyObject* self,
|
| 1184 |
+
PyObject* args,
|
| 1185 |
+
PyObject* kwargs,
|
| 1186 |
+
PyObject* torch_api,
|
| 1187 |
+
const char* module_name,
|
| 1188 |
+
const char* func_name_override = nullptr) -> PyObject*;
|
| 1189 |
+
|
| 1190 |
+
// Used for functions which needs to parse python args.
|
| 1191 |
+
auto handle_torch_function(
|
| 1192 |
+
PythonArgs& r,
|
| 1193 |
+
PyObject* args,
|
| 1194 |
+
PyObject* kwargs,
|
| 1195 |
+
PyObject* torch_api,
|
| 1196 |
+
const char* module_name,
|
| 1197 |
+
const char* func_name_override = nullptr) -> PyObject*;
|
| 1198 |
+
|
| 1199 |
+
// Used for functions that have no argument parsing.
|
| 1200 |
+
auto handle_torch_function(
|
| 1201 |
+
PyObject* self,
|
| 1202 |
+
const std::string& func_name,
|
| 1203 |
+
PyObject* args = nullptr,
|
| 1204 |
+
PyObject* kwargs = nullptr,
|
| 1205 |
+
PyObject* torch_api = THPVariableClass,
|
| 1206 |
+
const std::string& module_name = "torch.Tensor") -> PyObject*;
|
| 1207 |
+
|
| 1208 |
+
// Used for functions created in C++, e.g., C++ custom op, which doesn't use
|
| 1209 |
+
// PythonArgParser to get overloaded_args.
|
| 1210 |
+
enum class TorchFunctionName { TorchFunction, TorchDispatch };
|
| 1211 |
+
|
| 1212 |
+
auto TORCH_PYTHON_API handle_torch_function_no_python_arg_parser(
|
| 1213 |
+
at::ArrayRef<PyObject*> overloaded_args,
|
| 1214 |
+
PyObject* args,
|
| 1215 |
+
PyObject* kwargs,
|
| 1216 |
+
const char* func_name,
|
| 1217 |
+
PyObject* torch_api_function,
|
| 1218 |
+
const char* module_name,
|
| 1219 |
+
TorchFunctionName torch_function_name = TorchFunctionName::TorchFunction)
|
| 1220 |
+
-> PyObject*;
|
| 1221 |
+
|
| 1222 |
+
// Used for getters of Tensor properties
|
| 1223 |
+
auto handle_torch_function_getter(
|
| 1224 |
+
THPVariable* self,
|
| 1225 |
+
const std::string& property_name) -> PyObject*;
|
| 1226 |
+
|
| 1227 |
+
// Used for setters of Tensor properties.
|
| 1228 |
+
auto handle_torch_function_setter(
|
| 1229 |
+
THPVariable* self,
|
| 1230 |
+
const std::string& property_name,
|
| 1231 |
+
PyObject* value) -> int;
|
| 1232 |
+
|
| 1233 |
+
// Used for __getitem__ and __setitem__
|
| 1234 |
+
auto handle_torch_function_indexing(
|
| 1235 |
+
PyObject* self,
|
| 1236 |
+
PyObject* index,
|
| 1237 |
+
PyObject* val = nullptr) -> PyObject*;
|
| 1238 |
+
|
| 1239 |
+
/*
|
| 1240 |
+
* Check if the input obj is Tensor type, including its subclass, or overloaded
|
| 1241 |
+
* type. If the type defines __torch_function__, it also returns true.
|
| 1242 |
+
* Otherwise returns flase. If the class is not torch.Tensor, and it defines
|
| 1243 |
+
* __torch_function__, we append obj to overloaded_args.
|
| 1244 |
+
*
|
| 1245 |
+
* 'obj': the input argument to be checked
|
| 1246 |
+
* 'overloaded_args': the vector to append the overloaded args.
|
| 1247 |
+
*/
|
| 1248 |
+
bool is_tensor_and_append_overloaded(
|
| 1249 |
+
PyObject* obj,
|
| 1250 |
+
std::vector<PyObject*>* overloaded_args);
|
| 1251 |
+
|
| 1252 |
+
/*
|
| 1253 |
+
* Check if the input obj is Tensor List or Tensor Tuple type. First check
|
| 1254 |
+
* whether obj is Tuple or List type, if true, iterate over each element and
|
| 1255 |
+
* check whether it is Tensor type, including its subclass or overloaded type.
|
| 1256 |
+
* At the same time, the overloaded arg is appended to the overloaded_args.
|
| 1257 |
+
*
|
| 1258 |
+
* 'obj': the input argument to be checked
|
| 1259 |
+
* 'overloaded_args': the vector to append the overloaded args.
|
| 1260 |
+
* 'argnum': the number of total arguments of the function being checked.
|
| 1261 |
+
* 'throw_error': whether throw error if any element in the list or tuple is
|
| 1262 |
+
* not tensor type or overloaded.
|
| 1263 |
+
*/
|
| 1264 |
+
bool is_tensor_list_and_append_overloaded(
|
| 1265 |
+
PyObject* obj,
|
| 1266 |
+
std::vector<PyObject*>* overloaded_args,
|
| 1267 |
+
size_t argnum,
|
| 1268 |
+
bool throw_error);
|
| 1269 |
+
|
| 1270 |
+
/* Given an argument that is definitely a tensor and is definitely overloaded,
|
| 1271 |
+
* append it to the overloaded arguments list. Use this instead of
|
| 1272 |
+
* is_tensor_and_append_overloaded in situations where you have a PyObject
|
| 1273 |
+
* and you know it definitely is a Tensor and it is definitely overloaded.
|
| 1274 |
+
*
|
| 1275 |
+
* 'overloaded_args': the vector to append the overloaded args
|
| 1276 |
+
* 'obj': the input tensor that is overloaded
|
| 1277 |
+
*/
|
| 1278 |
+
void append_overloaded_tensor(
|
| 1279 |
+
std::vector<PyObject*>* overloaded_args,
|
| 1280 |
+
PyObject* obj);
|
| 1281 |
+
|
| 1282 |
+
/* Given an argument that is definitely a type and is definitely overloaded,
|
| 1283 |
+
* append it to the overloaded arguments list. Use this only with
|
| 1284 |
+
* __torch_dispatch__, where we operate on classes that have a
|
| 1285 |
+
* __torch_dispatch__ classmethod.
|
| 1286 |
+
*
|
| 1287 |
+
* 'overloaded_args': the vector to append the overloaded type
|
| 1288 |
+
* 'obj': the input class that has a __torch_dispatch__ classmethod.
|
| 1289 |
+
*/
|
| 1290 |
+
void append_overloaded_type(
|
| 1291 |
+
std::vector<PyObject*>* overloaded_args,
|
| 1292 |
+
PyObject* obj);
|
| 1293 |
+
|
| 1294 |
+
} // namespace torch
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_compat.h
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#ifndef PYTHON_COMPAT
|
| 2 |
+
#define PYTHON_COMPAT
|
| 3 |
+
|
| 4 |
+
#include <torch/csrc/utils/pythoncapi_compat.h>
|
| 5 |
+
|
| 6 |
+
#ifdef __cplusplus
|
| 7 |
+
extern "C" {
|
| 8 |
+
#endif
|
| 9 |
+
|
| 10 |
+
// PyTorch-only compat functions
|
| 11 |
+
|
| 12 |
+
#define IS_PYTHON_3_11_PLUS PY_VERSION_HEX >= 0x030B00C1
|
| 13 |
+
#define IS_PYTHON_3_12_PLUS PY_VERSION_HEX >= 0x030C0000
|
| 14 |
+
#define IS_PYTHON_3_13_PLUS PY_VERSION_HEX >= 0x030D0000
|
| 15 |
+
#define IS_PYTHON_3_14_PLUS PY_VERSION_HEX >= 0x030E0000
|
| 16 |
+
|
| 17 |
+
PYCAPI_COMPAT_STATIC_INLINE(int)
|
| 18 |
+
PyCode_GetNCellvars(PyCodeObject* code) {
|
| 19 |
+
// gh-26364 added co_ncellvars to Python 3.11.0rc1
|
| 20 |
+
#if IS_PYTHON_3_11_PLUS
|
| 21 |
+
return code->co_ncellvars;
|
| 22 |
+
#else
|
| 23 |
+
return PyTuple_GET_SIZE(code->co_cellvars);
|
| 24 |
+
#endif
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
PYCAPI_COMPAT_STATIC_INLINE(int)
|
| 28 |
+
PyCode_GetNFreevars(PyCodeObject* code) {
|
| 29 |
+
// gh-26364 added co_nfreevars to Python 3.11.0rc1
|
| 30 |
+
#if IS_PYTHON_3_11_PLUS
|
| 31 |
+
return code->co_nfreevars;
|
| 32 |
+
#else
|
| 33 |
+
return PyTuple_GET_SIZE(code->co_freevars);
|
| 34 |
+
#endif
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
// Provided by CPython but getting the header for them is very hard
|
| 38 |
+
extern void _PyWeakref_ClearRef(PyWeakReference* self);
|
| 39 |
+
|
| 40 |
+
#ifdef __cplusplus
|
| 41 |
+
}
|
| 42 |
+
#endif
|
| 43 |
+
#endif // PYTHON_COMPAT
|
vllm/lib/python3.10/site-packages/torch/include/torch/csrc/utils/python_dispatch.h
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <pybind11/pybind11.h>
|
| 2 |
+
#include <torch/csrc/utils/pybind.h>
|
| 3 |
+
|
| 4 |
+
namespace torch::impl::dispatch {
|
| 5 |
+
|
| 6 |
+
void initDispatchBindings(PyObject* module);
|
| 7 |
+
|
| 8 |
+
void python_op_registration_trampoline_impl(
|
| 9 |
+
const c10::OperatorHandle& op,
|
| 10 |
+
c10::DispatchKey key,
|
| 11 |
+
c10::DispatchKeySet keyset,
|
| 12 |
+
torch::jit::Stack* stack,
|
| 13 |
+
bool with_keyset,
|
| 14 |
+
bool with_op);
|
| 15 |
+
|
| 16 |
+
} // namespace torch::impl::dispatch
|